summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/acpi_processor.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/int340x_thermal.c11
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/processor_core.c56
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c27
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci_xgene.c14
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c36
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/sata_dwc_460ex.c26
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c39
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c177
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/bus/arm-cci.c3
-rw-r--r--drivers/bus/mvebu-mbus.c13
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/clk/at91/clk-slow.c27
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-ppc-corenet.c2
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c28
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/bcm_kona_timer.c9
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c156
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpio/gpiolib-sysfs.c92
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c320
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c106
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c38
-rw-r--r--drivers/gpu/drm/drm_irq.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c11
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c44
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c69
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c7
-rw-r--r--drivers/gpu/drm/radeon/nid.h24
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c39
-rw-r--r--drivers/gpu/drm/radeon/sid.h18
-rw-r--r--drivers/gpu/drm/tegra/dc.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/hid/Kconfig3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c41
-rw-r--r--drivers/hid/hid-roccat-pyra.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/i5500_temp.c149
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/iio/adc/ad799x.c15
-rw-r--r--drivers/iio/inkern.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/input/evdev.c60
-rw-r--r--drivers/input/input.c22
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/keyboard/gpio_keys.c114
-rw-r--r--drivers/input/keyboard/hil_kbd.c6
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c141
-rw-r--r--drivers/input/mouse/alps.c84
-rw-r--r--drivers/input/mouse/elantech.c18
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/mouse/trackpoint.h5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h47
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c99
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-omap-intc.c26
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/mcb/mcb-pci.c27
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-target.c89
-rw-r--r--drivers/md/dm-thin.c35
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c23
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c4
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c24
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/mfd/da9052-core.c3
-rw-r--r--drivers/mfd/rtsx_usb.c12
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/stmpe.h3
-rw-r--r--drivers/mfd/tps65218.c12
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
-rw-r--r--drivers/misc/mei/hw-me.c12
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c15
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can.c3
-rw-r--r--drivers/net/can/c_can/c_can_platform.c29
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/m_can/m_can.c5
-rw-r--r--drivers/net/can/usb/kvaser_usb.c57
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c24
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c144
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/dnet.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c47
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c104
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c201
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h6
-rw-r--r--drivers/net/ethernet/s6gmac.c1058
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c79
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c96
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/usb/r8152.c47
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c72
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c32
-rw-r--r--drivers/net/xen-netback/xenbus.c1
-rw-r--r--drivers/net/xen-netfront.c71
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c11
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi55
-rw-r--r--drivers/of/unittest.c39
-rw-r--r--drivers/parisc/lba_pci.c5
-rw-r--r--drivers/pci/bus.c43
-rw-r--r--drivers/pci/pci.c40
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/setup-bus.c56
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-omap-control.c7
-rw-r--r--drivers/phy/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/pinctrl/core.c5
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c102
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c1055
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/s2mps11.c61
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_l2_main.c220
-rw-r--r--drivers/s390/net/qeth_l3_main.c50
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c15
-rw-r--r--drivers/scsi/ipr.c92
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/spi/spi-dw-mid.c1
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-img-spfi.c8
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/media/tlg2300/Kconfig1
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/channel.c8
-rw-r--r--drivers/staging/vt6655/device_main.c13
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/target_core_device.c54
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_spc.c5
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/thermal/cpu_cooling.c360
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c20
-rw-r--r--drivers/thermal/imx_thermal.c17
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c24
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c311
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c17
-rw-r--r--drivers/thermal/rockchip_thermal.c1
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c17
-rw-r--r--drivers/tty/n_tty.c9
-rw-r--r--drivers/tty/serial/8250/8250_pci.c29
-rw-r--r--drivers/tty/serial/samsung.c56
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/host.c1
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c5
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c19
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c3
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/pci-quirks.c18
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/blackfin.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c34
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/phy/phy.c14
-rw-r--r--drivers/usb/serial/console.c16
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/keyspan.c20
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/uas-detect.h33
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h53
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c5
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/logo/logo.c17
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_legacy.c12
-rw-r--r--drivers/watchdog/cadence_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c40
-rw-r--r--drivers/watchdog/meson_wdt.c1
474 files changed, 6300 insertions, 5359 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
-source "drivers/soc/Kconfig"
-
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 67d2334dc41e..527a6da8d539 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
obj-y += tty/
obj-y += char/
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
@@ -141,7 +144,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1fdf5e07a1c7..1020b1b53a17 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->apic_id == -1)
+ if (pr->phys_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+ ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
goto out;
}
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int apic_id, cpu_index, device_declaration = 0;
+ int phys_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
- apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0)
- acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- pr->apic_id = apic_id;
+ phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+ if (phys_id < 0)
+ acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+ pr->phys_id = phys_id;
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ /*
+ * Handle UP system running SMP kernel, with no CPU
+ * entry in MADT
+ */
if ((cpu_index == -1) && (num_online_cpus() == 1))
cpu_index = 0;
}
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c2daa85fc9f7..c0d44d394ca3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device))
- return 0;
+ return -ENXIO;
result = acpi_device_get_power(device, &state);
if (result)
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index a27d31d1ba24..9dcf83682e36 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -14,10 +14,10 @@
#include "internal.h"
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400", DO_ENUMERATION },
- {"INT3401"},
+ {"INT3400"},
+ {"INT3401", INT3401_DEVICE},
{"INT3402"},
{"INT3403"},
{"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
- if (id->driver_data == DO_ENUMERATION)
+ acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+ /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+ if (id->driver_data == INT3401_DEVICE)
acpi_create_platform_device(adev);
#endif
return 1;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 5277a0ee5704..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- dev->irq = 0;
dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 342942f90a10..02e48394276c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
- int apic_id = -1;
+ int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
}
if (!madt)
- return apic_id;
+ return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
- return apic_id;
+ return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int apic_id = -1;
+ int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &apic_id);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &apic_id);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &apic_id);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
- return apic_id;
+ return phys_id;
}
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = map_mat_entry(handle, type, acpi_id);
- if (apic_id == -1)
- apic_id = map_madt_entry(type, acpi_id);
+ phys_id = map_mat_entry(handle, type, acpi_id);
+ if (phys_id == -1)
+ phys_id = map_madt_entry(type, acpi_id);
- return apic_id;
+ return phys_id;
}
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (apic_id == -1) {
+ if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above apic_id is always set to -1.
+ * So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always returns 0 for the processor
+ * Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return apic_id;
+ return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
- if (cpu_physical_id(i) == apic_id)
+ if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
- if (apic_id == 0)
- return apic_id;
+ if (phys_id == 0)
+ return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = acpi_get_apicid(handle, type, acpi_id);
+ phys_id = acpi_get_phys_id(handle, type, acpi_id);
- return acpi_map_cpuid(apic_id, acpi_id);
+ return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 499536504698..87b704e41877 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
- if (cx->entry_method != ACPI_CSTATE_FFH)
- state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 16914cc30882..dc4d8960684a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
- if (!device->flags.power_manageable)
+ if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- if (acpi_bus_init_power(device)) {
- acpi_free_power_resources_lists(device);
+ if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
- }
}
static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
device->flags.visited = false;
+ device->flags.power_manageable = 0;
return;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
- acpi_bus_update_power(device, NULL);
+ device->flags.power_manageable =
+ device->power.states[ACPI_STATE_D0].flags.valid;
+ if (acpi_bus_init_power(device))
+ device->flags.power_manageable = 0;
+
device->flags.initialized = true;
}
device->flags.visited = false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1eaadff2e198..032db459370f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
+
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+ },
+ },
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+ },
+ },
+
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+ .callback = video_disable_native_backlight,
+ .ident = "Dell XPS15 L521X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+ },
+ },
{}
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a3a13605a9c4..5f601553b9b0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -835,6 +835,7 @@ config PATA_AT32
config PATA_AT91
tristate "PATA support for AT91SAM9260"
depends on ARM && SOC_AT91SAM9
+ depends on !ARCH_MULTIPLATFORM
help
This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 49f1e6890587..33bb06e006c9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index feeb8f1e2fe8..cbcd20810355 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
* xgene_ahci_qc_issue - Issue commands to the device
* @qc: Command to issue
*
- * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
- * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
- * state machine goes into the CMFatalErrorUpdate state and locks up. By
- * restarting the dma engine, it removes the controller out of lock up state.
+ * Due to Hardware errata for IDENTIFY DEVICE command and PACKET
+ * command of ATAPI protocol set, the controller cannot clear the BSY bit
+ * after receiving the PIO setup FIS. This results in the DMA state machine
+ * going into the CMFatalErrorUpdate state and locks up. By restarting the
+ * DMA engine, it removes the controller out of lock up state.
*/
static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
{
@@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
- if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+ (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET)))
xgene_ahci_restart_engine(ap);
rc = ahci_qc_issue(qc);
@@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*
* Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
*/
- id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 97683e45ab04..61a9c07e0dff 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
devslp = readl(port_mmio + PORT_DEVSLP);
if (!(devslp & PORT_DEVSLP_DSP)) {
- dev_err(ap->host->dev, "port does not support device sleep\n");
+ dev_info(ap->host->dev, "port does not support device sleep\n");
return;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5c84fb5c3372..d1a05f9bb91f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+ * (Return Zero After Trim) flags in the ATA Command Set are
+ * unreliable in the sense that they only define what happens if
+ * the device successfully executed the DSM TRIM command. TRIM
+ * is only advisory, however, and the device is free to silently
+ * ignore all or parts of the request.
+ *
+ * Whitelist drives that are known to reliably return zeroes
+ * after TRIM.
+ */
+
+ /*
+ * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+ * that model before whitelisting all other intel SSDs.
+ */
+ { "INTEL*SSDSC2MH*", NULL, 0, },
+
+ { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
return NULL;
for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
+ if (ap->flags & ATA_FLAG_LOWTAG)
+ tag = i;
+ else
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbec8954c86..8d00c2638bed 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command)
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
/**
* ata_eh_link_report - report error handling to user
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e364e86e84d7..6abd17a85b13 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
- rbuf[14] |= 0x80; /* TPE */
+ rbuf[14] |= 0x80; /* LBPME */
- if (ata_id_has_zero_after_trim(args->id))
- rbuf[14] |= 0x40; /* TPRZ */
+ if (ata_id_has_zero_after_trim(args->id) &&
+ dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
+ }
}
}
-
return 0;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index db90aa35cb71..2e86e3b85266 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
DPRINTK("ENTER\n");
cancel_delayed_work_sync(&ap->sff_pio_task);
+
+ /*
+ * We wanna reset the HSM state to IDLE. If we do so without
+ * grabbing the port lock, critical sections protected by it which
+ * expect the HSM state to stay stable may get surprised. For
+ * example, we may set IDLE in between the time
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+ */
+ spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
+ spin_unlock_irq(ap->lock);
+
ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index c7ddef89e7b0..8e8248179d20 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
if (err) {
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
" %d\n", __func__, err);
- goto error_out;
+ return err;
}
/* Enabe DMA */
@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
sata_dma_regs);
return 0;
-
-error_out:
- dma_dwc_exit(hsdev);
-
- return err;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
char *ver = (char *)&versionr;
u8 *base = NULL;
int err = 0;
- int irq, rc;
+ int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Get physical SATA DMA register base address */
@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Save dev for later use in dev_xxx() routines */
host_pvt.dwc_dev = &ofdev->dev;
/* Initialize AHB DMAC */
- dma_dwc_init(hsdev, irq);
+ err = dma_dwc_init(hsdev, irq);
+ if (err)
+ goto error_dma_iomap;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
- if (rc != 0)
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+ if (err)
dev_err(&ofdev->dev, "failed to activate host");
dev_set_drvdata(&ofdev->dev, host);
@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
error_iomap:
iounmap(base);
error_kmalloc:
@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
iounmap(hsdev->reg_base);
kfree(hsdev);
kfree(host);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index d81b20ddb527..ea655949023f 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -246,7 +246,7 @@ enum {
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
- ATA_FLAG_AN | ATA_FLAG_PMP,
+ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6a103a35ea9b..0d8780c04a5e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
return genpd;
}
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d24dd614a0bd..106c69359306 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
+#define opp_rcu_lockdep_assert() \
+do { \
+ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&dev_opp_list_lock), \
+ "Missing rcu_read_lock() or " \
+ "dev_opp_list_lock protection"); \
+} while (0)
+
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
+ rcu_read_lock();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return r;
+ count = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n",
+ __func__, count);
+ goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count++;
}
+out_unlock:
+ rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
- if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
- PTR_ERR(dev_opp)))
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
return;
+ }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ae9f615382f6..aa2224aa7caa 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -530,7 +530,7 @@ static int null_add_dev(void)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q) {
+ if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..d826bf3e62c8 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- u16 cq_vector;
+ s16 cq_vector;
u16 sq_head;
u16 sq_tail;
u16 cq_head;
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
cmd->fn = handler;
cmd->ctx = ctx;
cmd->aborted = 0;
+ blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
}
/* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
if (unlikely(status)) {
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
&& (jiffies - req->start_time) < req->timeout) {
+ unsigned long flags;
+
blk_mq_requeue_request(req);
- blk_mq_kick_requeue_list(req->q);
+ spin_lock_irqsave(req->q->queue_lock, flags);
+ if (!blk_queue_stopped(req->q))
+ blk_mq_kick_requeue_list(req->q);
+ spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- blk_mq_start_request(req);
-
nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock);
if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
if (IS_ERR(req))
return PTR_ERR(req);
+ req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
nvme_set_info(cmd_info, req, async_req_completion);
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
struct nvme_command cmd;
if (!nvmeq->qid || cmd_rq->aborted) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_list_lock, flags);
if (work_busy(&dev->reset_work))
- return;
+ goto out;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
+ out:
+ spin_unlock_irqrestore(&dev_list_lock, flags);
return;
}
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
void *ctx;
nvme_completion_fn fn;
struct nvme_cmd_info *cmd;
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
- };
+ struct nvme_completion cqe;
+
+ if (!blk_mq_request_started(req))
+ return;
cmd = blk_mq_rq_to_pdu(req);
if (cmd->ctx == CMD_CTX_CANCELLED)
return;
+ if (blk_queue_dying(req->q))
+ cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ else
+ cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
req->tag, nvmeq->qid);
ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
- nvmeq->qid);
- if (nvmeq->dev->initialized)
- nvme_abort_req(req);
-
/*
* The aborted req will be completed on receiving the abort req.
* We enable the timer again. If hit twice, it'll cause a device reset,
* as the device then is in a faulty state.
*/
- return BLK_EH_RESET_TIMER;
+ int ret = BLK_EH_RESET_TIMER;
+
+ dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+ nvmeq->qid);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->dev->initialized) {
+ /*
+ * Force cancelled command frees the request, which requires we
+ * return BLK_EH_NOT_HANDLED.
+ */
+ nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+ ret = BLK_EH_NOT_HANDLED;
+ } else
+ nvme_abort_req(req);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return ret;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ int vector;
spin_lock_irq(&nvmeq->q_lock);
+ if (nvmeq->cq_vector == -1) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return 1;
+ }
+ vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
nvmeq->dev->online_queues--;
+ nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
+ if (!qid && dev->admin_q)
+ blk_mq_freeze_queue_start(dev->admin_q);
nvme_clear_queue(nvmeq);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int vector)
+ int depth)
{
struct device *dmadev = &dev->pci_dev->dev;
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
- nvmeq->cq_vector = vector;
nvmeq->qid = qid;
dev->queue_count++;
dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
.timeout = nvme_timeout,
};
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+ blk_cleanup_queue(dev->admin_q);
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ }
+}
+
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return -ENOMEM;
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (!dev->admin_q) {
+ if (IS_ERR(dev->admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
return -ENOMEM;
}
- }
+ if (!blk_get_queue(dev->admin_q)) {
+ nvme_dev_remove_admin(dev);
+ return -ENODEV;
+ }
+ } else
+ blk_mq_unfreeze_queue(dev->admin_q);
return 0;
}
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
- if (dev->admin_q)
- blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (!nvmeq)
return -ENOMEM;
}
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
goto free_nvmeq;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto free_nvmeq;
-
+ nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
- goto free_tags;
+ goto free_nvmeq;
return result;
- free_tags:
- nvme_free_admin_tags(dev);
free_nvmeq:
nvme_free_queues(dev, 0);
return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
unsigned i;
for (i = dev->queue_count; i <= dev->max_qid; i++)
- if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ if (!nvme_alloc_queue(dev, i, dev->q_depth))
break;
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
break;
if (!schedule_timeout(ADMIN_TIMEOUT) ||
fatal_signal_pending(current)) {
+ /*
+ * Disable the controller first since we can't trust it
+ * at this point, but leave the admin queue enabled
+ * until all queue deletion requests are flushed.
+ * FIXME: This may take a while if there are more h/w
+ * queues than admin tags.
+ */
set_current_state(TASK_RUNNING);
-
nvme_disable_ctrl(dev, readq(&dev->bar->cap));
- nvme_disable_queue(dev, 0);
-
- send_sig(SIGKILL, dq->worker->task, 1);
+ nvme_clear_queue(dev->queues[0]);
flush_kthread_worker(dq->worker);
+ nvme_disable_queue(dev, 0);
return;
}
}
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
{
struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
cmdinfo.work);
- allow_signal(SIGKILL);
if (nvme_delete_sq(nvmeq))
nvme_del_queue_end(nvmeq);
}
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
kthread_stop(tmp);
}
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ blk_mq_freeze_queue_start(ns->queue);
+
+ spin_lock(ns->queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+ spin_unlock(ns->queue->queue_lock);
+
+ blk_mq_cancel_requeue_work(ns->queue);
+ blk_mq_stop_hw_queues(ns->queue);
+ }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+ blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+ blk_mq_kick_requeue_list(ns->queue);
+ }
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
dev->initialized = 0;
nvme_dev_list_remove(dev);
- if (dev->bar)
+ if (dev->bar) {
+ nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
+ }
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_unmap(dev);
}
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
- if (dev->admin_q && !blk_queue_dying(dev->admin_q))
- blk_cleanup_queue(dev->admin_q);
-}
-
static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
- if (!blk_queue_dying(ns->queue))
+ if (!blk_queue_dying(ns->queue)) {
+ blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
+ }
}
}
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
nvme_free_namespaces(dev);
nvme_release_instance(dev);
blk_mq_free_tag_set(&dev->tagset);
+ blk_put_queue(dev->admin_q);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
}
nvme_init_queue(dev->queues[0], 0);
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto disable;
result = nvme_setup_io_queues(dev);
if (result)
- goto disable;
+ goto free_tags;
nvme_set_irq_hints(dev);
return result;
+ free_tags:
+ nvme_dev_remove_admin(dev);
disable:
nvme_disable_queue(dev, 0);
nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
+ } else {
+ nvme_unfreeze_queues(dev);
+ nvme_set_irq_hints(dev);
}
dev->initialized = 1;
return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
- nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_dev_remove(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_free_admin_tags(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened. We
- * drop it again if there is no overlap.
- *
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
- int counter;
+ int counter = 0;
if (!rbd_dev->parent_spec)
return false;
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- if (counter > 0 && rbd_dev->parent_overlap)
- return true;
-
- /* Image was flattened, but parent is not yet torn down */
+ down_read(&rbd_dev->header_rwsem);
+ if (rbd_dev->parent_overlap)
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
- return false;
+ return counter > 0;
}
/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
- smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
- smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
- /* Drop parent reference unless it's already been done (or none) */
-
- if (rbd_dev->parent_overlap)
- rbd_dev_parent_put(rbd_dev);
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c098708f..cdfbd21e3597 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
- if (!q) {
+ if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 860da40b78ef..0ce5e2d65a06 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
if (!np)
return -ENODEV;
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
cci_config = of_match_node(arm_cci_matches, np)->data;
if (!cci_config)
return -ENODEV;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index eb7682dc123b..81bf297f1034 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
}
/* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (win == 13)
+ return false;
+
return !(ctrl & WIN_CTRL_ENABLE);
}
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db03667650..dcbbb4ea3cc1 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0310e1..0ef350010766 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6baa304..75a9786a77e6 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce8328..38ffb281df97 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup);
#endif
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5d31b7..0a21daed5b62 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f3334829e55a..92aa43fa8d70 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
}
EXPORT_SYMBOL(intel_gmch_remove);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b75eb31..6c8d39cb566e 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@redhat.com>
+ * to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20cddc05..a4961d35e940 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f751378..6b65fa4e0c55 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@ struct bmc_device {
int guid_set;
char name[16];
struct kref usecount;
-
- /* bmc device attributes */
- struct device_attribute device_id_attr;
- struct device_attribute provides_dev_sdrs_attr;
- struct device_attribute revision_attr;
- struct device_attribute firmware_rev_attr;
- struct device_attribute version_attr;
- struct device_attribute add_dev_support_attr;
- struct device_attribute manufacturer_id_attr;
- struct device_attribute product_id_attr;
- struct device_attribute guid_attr;
- struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
-DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
-DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+ NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
-DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
-DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
-DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
-DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
-DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
-DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
-DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
-DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
int err;
if (bmc->id.aux_firmware_revision_set) {
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
- bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
if (err)
goto out_aux_firm;
}
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
out:
return err;
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac27e73c..982b96323f82 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
+#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
@@ -968,7 +969,8 @@ static void sender(void *send_info,
do_gettimeofday(&t);
pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
- msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ msg->data[0], msg->data[1],
+ (long) t.tv_sec, (long) t.tv_usec);
}
}
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 32f7c1b36204..2f13bd5246b5 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+static struct clk *slow_clk;
static int clk_slow_osc_prepare(struct clk_hw *hw)
{
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+/*
+ * FIXME: All slow clk users are not properly claiming it (get + prepare +
+ * enable) before using it.
+ * If all users properly claiming this clock decide that they don't need it
+ * anymore (or are removed), it is disabled while faulty users are still
+ * requiring it, and the system hangs.
+ * Prevent this clock from being disabled until all users are properly
+ * requesting it.
+ * Once this is done we should remove this function and the slow_clk variable.
+ */
+static int __init of_at91_clk_slow_retain(void)
+{
+ if (!slow_clk)
+ return 0;
+
+ __clk_get(slow_clk);
+ clk_prepare_enable(slow_clk);
+
+ return 0;
+}
+arch_initcall(of_at91_clk_slow_retain);
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 21784e4eb3f0..440ef81ab15c 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
{ "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
{ "sdio", "perif", 16, CLK_IGNORE_UNUSED },
{ "nfc", "perif", 18 },
- { "smemc", "perif", 19 },
{ "pcie", "perif", 22 },
};
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index b6e6c85507a5..0a47d6f49cd6 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
{}
};
-static struct platform_driver ppc_corenet_clk_driver __initdata = {
+static struct platform_driver ppc_corenet_clk_driver = {
.driver = {
.name = "ppc_corenet_clock",
.of_match_table = ppc_clk_ids,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f4963b7d4e17..d48ac71c6c8b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
&parent_hw);
- parent = parent_hw->clk;
+ parent = parent_hw ? parent_hw->clk : NULL;
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 75c8c45ef728..8539c4fd34cc 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
unsigned long alt_prate, alt_div;
+ unsigned long flags;
alt_prate = clk_get_rate(cpuclk->alt_parent);
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
/*
* If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
cpuclk->reg_base + reg_data->core_reg);
}
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
return -EINVAL;
}
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
if (ndata->old_rate < ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
if (ndata->old_rate > ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index c54078960847..7eb684c50d42 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
PNAME(mux_mac_p) = { "gpll", "dpll" };
PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
+static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+ RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
+ [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+ RK2928_MODE_CON, 4, 4, 0, NULL),
+ [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+ RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+ [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+ RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+};
+
static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_peri gates */
GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
- GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
- GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
- GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(5), 14, GFLAGS),
GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
static void __init rk3066a_clk_init(struct device_node *np)
{
rk3188_common_clk_init(np);
- rockchip_clk_register_plls(rk3188_pll_clks,
- ARRAY_SIZE(rk3188_pll_clks),
+ rockchip_clk_register_plls(rk3066_pll_clks,
+ ARRAY_SIZE(rk3066_pll_clks),
RK3066_GRF_SOC_STATUS);
rockchip_clk_register_branches(rk3066a_clk_branches,
ARRAY_SIZE(rk3066a_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index ac6be7c0132d..11194b8329fe 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
}
static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
- RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
+ RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
};
static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6a79fc4f900c..095c1774592c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0595dc6c453e..f1e33d08dd83 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- void __iomem *base = IOMEM(timer_base);
int loop_limit = 4;
/*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
*/
while (--loop_limit) {
- *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
- *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
- if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+ *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+ *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+ if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
}
if (!loop_limit) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9403061a2acc..83564c9cfdbe 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
- switch (offset & EXYNOS4_MCT_L_MASK) {
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0f665b8f2461..f150ca82bfaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f56147a1daed..fde97d6e31d6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a09a29c312a9..46bed4f81cde 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 37263d9a1051..401c0106ed34 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
- last_residency = cpuidle_get_last_residency(dev) - \
- drv->states[last_idx].exit_latency;
- }
- else
- last_residency = last_state->threshold.promotion_time + 1;
+ last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 659d7b0c9ebf..40580794e23d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
- * we are basically lost in the dark how much time passed.
- * As a compromise, assume we slept for the whole expected time.
+ * we use them anyway if they are short, and if long,
+ * truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
- /* Use timer value as is */
- measured_us = data->next_timer_us;
- } else {
- /* Use measured value */
- measured_us = cpuidle_get_last_residency(dev);
+ /* measured value */
+ measured_us = cpuidle_get_last_residency(dev);
- /* Deduct exit latency */
- if (measured_us > target->exit_latency)
- measured_us -= target->exit_latency;
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
- /* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
- }
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
+ measured_us = data->next_timer_us;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 380478562b7d..5c062548957c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->regs = chip->regs;
chip->dw = dw;
- pm_runtime_enable(chip->dev);
pm_runtime_get_sync(chip->dev);
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
}
pm_runtime_put_sync_suspend(chip->dev);
- pm_runtime_disable(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index a630161473a4..32ea1aca7a0e 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
+ pm_runtime_enable(&pdev->dev);
+
err = dw_dma_probe(chip, pdata);
if (err)
goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
return 0;
err_dw_dma_probe:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return err;
}
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dw_dma_remove(chip);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return 0;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 55d4803d71b0..3d9e08f7e823 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
virq = irq_find_mapping(cg->chip.irqdomain, gpio);
- generic_handle_irq(virq);
+ handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 978b51eae2ec..ce3c1558cb0a 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -47,13 +47,6 @@
#define DLN2_GPIO_MAX_PINS 32
-struct dln2_irq_work {
- struct work_struct work;
- struct dln2_gpio *dln2;
- int pin;
- int type;
-};
-
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
*/
DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
- struct dln2_irq_work *irq_work;
+ /* active IRQs - not synced to hardware */
+ DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+ /* active IRQS - synced to hardware */
+ DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+ int irq_type[DLN2_GPIO_MAX_PINS];
+ struct mutex irq_lock;
};
struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
return !!ret;
}
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
- unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+ unsigned int pin, int value)
{
struct dln2_gpio_pin_val req = {
.pin = cpu_to_le16(pin),
.value = value,
};
- dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
- sizeof(req));
+ return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+ sizeof(req));
}
#define DLN2_GPIO_DIRECTION_IN 0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
+ struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+ int ret;
+
+ ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+ if (ret < 0)
+ return ret;
+
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
&req, sizeof(req));
}
-static void dln2_irq_work(struct work_struct *w)
-{
- struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
- struct dln2_gpio *dln2 = iw->dln2;
- u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
- if (test_bit(iw->pin, dln2->irqs_enabled))
- dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
- else
- dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- int pin = irqd_to_hwirq(irqd);
-
- set_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- clear_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
+ set_bit(pin, dln2->unmasked_irqs);
}
static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- struct device *dev = dln2->gpio.dev;
- int pin = irqd_to_hwirq(irqd);
-
- if (test_and_clear_bit(pin, dln2->irqs_pending)) {
- int irq;
-
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
- if (!irq) {
- dev_err(dev, "pin %d not mapped to IRQ\n", pin);
- return;
- }
-
- generic_handle_irq(irq);
- }
+ clear_bit(pin, dln2->unmasked_irqs);
}
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
break;
case IRQ_TYPE_EDGE_BOTH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
break;
case IRQ_TYPE_EDGE_RISING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
break;
default:
return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
return 0;
}
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+ mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+ int pin = irqd_to_hwirq(irqd);
+ int enabled, unmasked;
+ unsigned type;
+ int ret;
+
+ enabled = test_bit(pin, dln2->enabled_irqs);
+ unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+ set_bit(pin, dln2->enabled_irqs);
+ } else {
+ type = DLN2_GPIO_EVENT_NONE;
+ clear_bit(pin, dln2->enabled_irqs);
+ }
+
+ ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+ if (ret)
+ dev_err(dln2->gpio.dev, "failed to set event\n");
+ }
+
+ mutex_unlock(&dln2->irq_lock);
+}
+
static struct irq_chip dln2_gpio_irqchip = {
.name = "dln2-irq",
- .irq_enable = dln2_irq_enable,
- .irq_disable = dln2_irq_disable,
.irq_mask = dln2_irq_mask,
.irq_unmask = dln2_irq_unmask,
.irq_set_type = dln2_irq_set_type,
+ .irq_bus_lock = dln2_irq_bus_lock,
+ .irq_bus_sync_unlock = dln2_irq_bus_unlock,
};
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- if (!test_bit(pin, dln2->irqs_enabled))
- return;
- if (test_bit(pin, dln2->irqs_masked)) {
- set_bit(pin, dln2->irqs_pending);
- return;
- }
-
- switch (dln2->irq_work[pin].type) {
+ switch (dln2->irq_type[pin]) {
case DLN2_GPIO_EVENT_CHANGE_RISING:
if (event->value)
generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
struct dln2_gpio *dln2;
struct device *dev = &pdev->dev;
int pins;
- int i, ret;
+ int ret;
pins = dln2_gpio_get_pin_count(pdev);
if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
if (!dln2)
return -ENOMEM;
- dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
- sizeof(struct dln2_irq_work), GFP_KERNEL);
- if (!dln2->irq_work)
- return -ENOMEM;
- for (i = 0; i < pins; i++) {
- INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
- dln2->irq_work[i].pin = i;
- dln2->irq_work[i].dln2 = dln2;
- }
+ mutex_init(&dln2->irq_lock);
dln2->pdev = pdev;
@@ -529,11 +510,8 @@ out:
static int dln2_gpio_remove(struct platform_device *pdev)
{
struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
- int i;
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- for (i = 0; i < dln2->gpio.ngpio; i++)
- flush_work(&dln2->irq_work[i].work);
gpiochip_remove(&dln2->gpio);
return 0;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 09daaf2aeb56..3a5a71050559 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add(gc);
if (err) {
dev_err(&ofdev->dev, "Could not add gpiochip\n");
- irq_domain_remove(priv->domain);
+ if (priv->domain)
+ irq_domain_remove(priv->domain);
return err;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 604dbe60bdee..08261f2b3a82 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
return false;
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
- if (ret < 0)
- return false;
+ if (ret < 0) {
+ /* We've found the gpio chip, but the translation failed.
+ * Return true to stop looking and return the translation
+ * error via out_gpio
+ */
+ gg_data->out_gpio = ERR_PTR(ret);
+ return true;
+ }
gg_data->out_gpio = gpiochip_get_desc(gc, ret);
return true;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 2ac1800b58bb..f62aa115d79a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
-static const struct attribute *gpio_attrs[] = {
+static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+ bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
+
+ if (attr == &dev_attr_direction.attr) {
+ if (!show_direction)
+ mode = 0;
+ } else if (attr == &dev_attr_edge.attr) {
+ if (gpiod_to_irq(desc) < 0)
+ mode = 0;
+ if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static struct attribute *gpio_attrs[] = {
+ &dev_attr_direction.attr,
+ &dev_attr_edge.attr,
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
-static const struct attribute_group gpio_attr_group = {
- .attrs = (struct attribute **) gpio_attrs,
+static const struct attribute_group gpio_group = {
+ .attrs = gpio_attrs,
+ .is_visible = gpio_is_visible,
+};
+
+static const struct attribute_group *gpio_groups[] = {
+ &gpio_group,
+ NULL
};
/*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
}
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
&dev_attr_ngpio.attr,
NULL,
};
-
-static const struct attribute_group gpiochip_attr_group = {
- .attrs = (struct attribute **) gpiochip_attrs,
-};
+ATTRIBUTE_GROUPS(gpiochip);
/*
* /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
goto fail_unlock;
}
- if (!desc->chip->direction_input || !desc->chip->direction_output)
- direction_may_change = false;
+ if (desc->chip->direction_input && desc->chip->direction_output &&
+ direction_may_change) {
+ set_bit(FLAG_SYSFS_DIR, &desc->flags);
+ }
+
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
if (desc->chip->names && desc->chip->names[offset])
ioname = desc->chip->names[offset];
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
+ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
+ MKDEV(0, 0), desc, gpio_groups,
+ ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
}
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
- if (status)
- goto fail_unregister_device;
-
- if (direction_may_change) {
- status = device_create_file(dev, &dev_attr_direction);
- if (status)
- goto fail_unregister_device;
- }
-
- if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
- !test_bit(FLAG_IS_OUT, &desc->flags))) {
- status = device_create_file(dev, &dev_attr_edge);
- if (status)
- goto fail_unregister_device;
- }
-
set_bit(FLAG_EXPORT, &desc->flags);
mutex_unlock(&sysfs_lock);
return 0;
-fail_unregister_device:
- device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc)
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
gpio_setup_irq(desc, dev, 0);
+ clear_bit(FLAG_SYSFS_DIR, &desc->flags);
clear_bit(FLAG_EXPORT, &desc->flags);
} else
status = -ENODEV;
@@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip)
/* use chip->base for the ID; it's already known to be unique */
mutex_lock(&sysfs_lock);
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
- "gpiochip%d", chip->base);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpiochip_attr_group);
- } else
+ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
+ chip, gpiochip_groups,
+ "gpiochip%d", chip->base);
+ if (IS_ERR(dev))
status = PTR_ERR(dev);
+ else
+ status = 0;
chip->exported = (status == 0);
mutex_unlock(&sysfs_lock);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 487afe6f22fc..568aa2b6bdb0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
- goto unlock;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
}
chip->base = base;
}
status = gpiochip_add_to_list(chip);
+ if (status) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
+ }
- if (status == 0) {
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
- desc->chip = chip;
-
- /* REVISIT: most hardware initializes GPIOs as
- * inputs (often with pullups enabled) so power
- * usage is minimized. Linux code should set the
- * gpio direction first thing; but until it does,
- * and in case chip->get_direction is not set,
- * we may expose the wrong direction in sysfs.
- */
- desc->flags = !chip->direction_input
- ? (1 << FLAG_IS_OUT)
- : 0;
- }
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &descs[id];
+
+ desc->chip = chip;
+
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
of_gpiochip_add(chip);
acpi_gpiochip_add(chip);
- if (status)
- goto fail;
-
status = gpiochip_export(chip);
if (status)
- goto fail;
+ goto err_remove_chip;
pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
return 0;
-unlock:
+err_remove_chip:
+ acpi_gpiochip_remove(chip);
+ of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
-fail:
- kfree(descs);
chip->desc = NULL;
+err_free_descs:
+ kfree(descs);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
unsigned long flags;
unsigned id;
- acpi_gpiochip_remove(chip);
-
- spin_lock_irqsave(&gpio_lock, flags);
+ gpiochip_unexport(chip);
gpiochip_irqchip_remove(chip);
+
+ acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- gpiochip_unexport(chip);
kfree(chip->desc);
chip->desc = NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e3a52113a541..550a5eafbd38 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -77,6 +77,7 @@ struct gpio_desc {
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
#define ID_SHIFT 16 /* add new flags before this one */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 66e40398b3d3..e620807418ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index be6246de5091..307a309110e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \
kfd_kernel_queue.o kfd_packet_manager.o \
- kfd_process_queue_manager.o kfd_device_queue_manager.o \
- kfd_interrupt.o
+ kfd_process_queue_manager.o kfd_device_queue_manager.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275f2f7b..fcfdf23e1913 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,6 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <linux/uaccess.h>
#include <uapi/asm-generic/mman-common.h>
#include <asm/processor.h>
#include "kfd_priv.h"
@@ -121,27 +120,20 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- process->is_32bit_user_mode = is_32bit_user_mode;
-
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
- kfd_init_apertures(process);
-
return 0;
}
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_get_version_args args;
+ struct kfd_ioctl_get_version_args *args = data;
int err = 0;
- args.major_version = KFD_IOCTL_MAJOR_VERSION;
- args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- err = -EFAULT;
+ args->major_version = KFD_IOCTL_MAJOR_VERSION;
+ args->minor_version = KFD_IOCTL_MINOR_VERSION;
return err;
}
@@ -225,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return 0;
}
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_create_queue_args args;
+ struct kfd_ioctl_create_queue_args *args = data;
struct kfd_dev *dev;
int err = 0;
unsigned int queue_id;
@@ -237,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
pr_debug("kfd: creating queue ioctl\n");
- err = set_queue_properties_from_user(&q_properties, &args);
+ err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -254,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto err_bind_process;
}
@@ -267,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err != 0)
goto err_create_queue;
- args.queue_id = queue_id;
+ args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
- args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
- if (copy_to_user(arg, &args, sizeof(args))) {
- err = -EFAULT;
- goto err_copy_args_out;
- }
+ args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+ pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n",
- args.ring_base_address);
+ args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n",
- args.read_pointer_address);
+ args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n",
- args.write_pointer_address);
+ args->write_pointer_address);
return 0;
-err_copy_args_out:
- pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_bind_process:
mutex_unlock(&p->mutex);
@@ -301,99 +283,90 @@ err_bind_process:
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_destroy_queue_args args;
-
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
+ struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n",
- args.queue_id,
+ args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+ retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_update_queue_args args;
+ struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
- if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
- if ((args.ring_base_address) &&
+ if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE,
- (const void __user *) args.ring_base_address,
+ (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n");
return -EFAULT;
}
- if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n");
return -EINVAL;
}
- properties.queue_address = args.ring_base_address;
- properties.queue_size = args.ring_size;
- properties.queue_percent = args.queue_percentage;
- properties.priority = args.queue_priority;
+ properties.queue_address = args->ring_base_address;
+ properties.queue_size = args->ring_size;
+ properties.queue_percent = args->queue_percentage;
+ properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n",
- args.queue_id, p->pasid);
+ args->queue_id, p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+ retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
-static long kfd_ioctl_set_memory_policy(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_ioctl_set_memory_policy_args *args = data;
struct kfd_dev *dev;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -401,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto out;
}
- default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
- (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
- (void __user *)args.alternate_aperture_base,
- args.alternate_aperture_size))
+ (void __user *)args->alternate_aperture_base,
+ args->alternate_aperture_size))
err = -EINVAL;
out:
@@ -426,53 +399,44 @@ out:
return err;
}
-static long kfd_ioctl_get_clock_counters(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
struct timespec time;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic(&time);
- args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
get_monotonic_boottime(&time);
- args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
- args.system_clock_freq = 1000000000;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
+ args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
- struct kfd_process *p, void __user *arg)
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- args.num_of_nodes = 0;
+ args->num_of_nodes = 0;
mutex_lock(&p->mutex);
@@ -481,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data(p);
do {
- pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture =
+ &args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
@@ -491,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
- "node id %u\n", args.num_of_nodes);
+ "node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
@@ -507,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
- args.num_of_nodes++;
+ args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
-
return 0;
}
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+ kfd_ioctl_get_version, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+ kfd_ioctl_create_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+ kfd_ioctl_destroy_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+ kfd_ioctl_set_memory_policy, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+ kfd_ioctl_get_clock_counters, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+ kfd_ioctl_get_process_apertures, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+ kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
- long err = -EINVAL;
+ amdkfd_ioctl_t *func;
+ const struct amdkfd_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode = -EINVAL;
+
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ goto err_i1;
+
+ if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+ u32 amdkfd_size;
+
+ ioctl = &amdkfd_ioctls[nr];
- dev_dbg(kfd_device,
- "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
- cmd, _IOC_NR(cmd), arg);
+ amdkfd_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (amdkfd_size > asize)
+ asize = amdkfd_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
- if (IS_ERR(process))
- return PTR_ERR(process);
+ if (IS_ERR(process)) {
+ dev_dbg(kfd_device, "no process\n");
+ goto err_i1;
+ }
- switch (cmd) {
- case KFD_IOC_GET_VERSION:
- err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
- break;
- case KFD_IOC_CREATE_QUEUE:
- err = kfd_ioctl_create_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_DESTROY_QUEUE:
- err = kfd_ioctl_destroy_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_SET_MEMORY_POLICY:
- err = kfd_ioctl_set_memory_policy(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_CLOCK_COUNTERS:
- err = kfd_ioctl_get_clock_counters(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_PROCESS_APERTURES:
- err = kfd_ioctl_get_process_apertures(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_UPDATE_QUEUE:
- err = kfd_ioctl_update_queue(filep, process,
- (void __user *)arg);
- break;
-
- default:
- dev_err(kfd_device,
- "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
- cmd, arg);
- err = -EINVAL;
- break;
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(kfd_device, "no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
}
- if (err < 0)
- dev_err(kfd_device,
- "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
- err, cmd, _IOC_NR(cmd));
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- return err;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(filep, process, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+ retcode = -EFAULT;
+
+err_i1:
+ if (!ioctl)
+ dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ if (retcode)
+ dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+ return retcode;
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 43884ebd4303..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
- size = max_num_of_processes *
- max_num_of_queues_per_process *
- kfd->device_info->mqd_size_aligned;
+ size = max_num_of_queues_per_device *
+ kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
size += 512 * 1024;
@@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error;
}
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto kfd_interrupt_error;
- }
-
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n",
@@ -237,8 +230,6 @@ dqm_start_error:
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd2kgd->fini_sa_manager(kfd->kgd);
@@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
- kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
}
@@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- if (kfd->init_complete) {
- spin_lock(&kfd->interrupt_lock);
-
- if (kfd->interrupts_active
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
-
- spin_unlock(&kfd->interrupt_lock);
- }
+ /* Process interrupts / schedule work as necessary */
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c072e5..0d8694f015c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
{
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ /* Release the vmid mapping */
+ set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
qpd->vmid = 0;
q->properties.vmid = 0;
@@ -180,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@@ -204,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
}
@@ -272,6 +290,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
+ pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+ q->pipe,
+ q->queue);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, (uint32_t __user *) q->properties.write_ptr);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ return retval;
+ }
+
return 0;
}
@@ -311,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -320,6 +359,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
+ bool prev_active = false;
BUG_ON(!dqm || !q || !q->mqd);
@@ -330,10 +370,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if (q->properties.is_active == true)
+ prev_active = true;
+
+ /*
+ *
+ * check active state vs. the previous state
+ * and modify counter accordingly
+ */
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if ((q->properties.is_active == true) && (prev_active == false))
dqm->queue_count++;
- else
+ else if ((q->properties.is_active == false) && (prev_active == true))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -517,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
+ /*
+ * HPD buffer on GTT is allocated by amdkfd, no need to waste
+ * space in GTT for pipelines we don't initialize
+ */
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -536,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
@@ -728,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@@ -751,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
@@ -769,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ retval = -EPERM;
+ goto out;
+ }
+
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
@@ -786,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -906,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da01c29..e64aa99e5e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev;
struct kfd_process_device *pdd;
- mutex_lock(&process->mutex);
-
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
+ if (!pdd)
+ return -1;
/*
* For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++;
}
- mutex_unlock(&process->mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
deleted file mode 100644
index 5b999095a1f7..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * KFD Interrupts.
- *
- * AMD GPUs deliver interrupts by pushing an interrupt description onto the
- * interrupt ring and then sending an interrupt. KGD receives the interrupt
- * in ISR and sends us a pointer to each new entry on the interrupt ring.
- *
- * We generally can't process interrupt-signaled events from ISR, so we call
- * out to each interrupt client module (currently only the scheduler) to ask if
- * each interrupt is interesting. If they return true, then it requires further
- * processing so we copy it to an internal interrupt ring and call each
- * interrupt client again from a work-queue.
- *
- * There's no acknowledgment for the interrupts we use. The hardware simply
- * queues a new interrupt each time without waiting.
- *
- * The fixed-size internal queue means that it's possible for us to lose
- * interrupts because we have no back-pressure to the hardware.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include "kfd_priv.h"
-
-#define KFD_INTERRUPT_RING_SIZE 256
-
-static void interrupt_wq(struct work_struct *);
-
-int kfd_interrupt_init(struct kfd_dev *kfd)
-{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
-
- spin_lock_init(&kfd->interrupt_lock);
-
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
-
- kfd->interrupts_active = true;
-
- /*
- * After this function returns, the interrupt will be enabled. This
- * barrier ensures that the interrupt running on a different processor
- * sees all the above writes.
- */
- smp_wmb();
-
- return 0;
-}
-
-void kfd_interrupt_exit(struct kfd_dev *kfd)
-{
- /*
- * Stop the interrupt handler from writing to the ring and scheduling
- * workqueue items. The spinlock ensures that any interrupt running
- * after we have unlocked sees interrupts_active = false.
- */
- unsigned long flags;
-
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
-
- /*
- * Flush_scheduled_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_scheduled_work();
-
- kfree(kfd->interrupt_ring);
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
- */
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
- dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
- return false;
- }
-
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
- return true;
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
- */
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
-{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
-
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
-
- return true;
-}
-
-static void interrupt_wq(struct work_struct *work)
-{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
- interrupt_work);
-
- uint32_t ih_ring_entry[DIV_ROUND_UP(
- dev->device_info->ih_ring_entry_size,
- sizeof(uint32_t))];
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- ;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..a8be6df85347 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
- if ((max_num_of_processes < 0) ||
- (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
- pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
- return -1;
- }
-
- if ((max_num_of_queues_per_process < 0) ||
- (max_num_of_queues_per_process >
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
- pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ if ((max_num_of_queues_per_device < 0) ||
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc31474e786..4c3828cf45bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 71699ad97d74..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,9 +30,9 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
- pasid_limit = max_num_of_processes;
+ pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
- pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3bb09..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
*/
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048
@@ -135,22 +134,10 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
-
/* QCM Device instance */
struct device_queue_manager *dqm;
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
};
/* KGD2KFD callbacks */
@@ -463,6 +450,24 @@ struct kfd_process {
bool is_32bit_user_mode;
};
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+ void *data);
+
+struct amdkfd_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ amdkfd_ioctl_t *func;
+ unsigned int cmd_drv;
+ const char *name;
+};
+
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);
@@ -513,10 +518,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b830b4..3c76ef05cbcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
+#include <linux/compat.h>
+
struct mm_struct;
#include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_process_pqm_init;
+ /* init process apertures*/
+ process->is_32bit_user_mode = is_compat_task();
+ if (kfd_init_apertures(process) != 0)
+ goto err_init_apretures;
+
return process;
+err_init_apretures:
+ pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..f37cf5efe642 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
- max_num_of_queues_per_process);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
- if (found >= max_num_of_queues_per_process) {
+ if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("kfd: error dqm create queue\n");
+ pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+ dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e2859e8a..cca1708fd811 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
- sysfs_show_32bit_prop(buffer, "engine_id",
- dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+ dev->gpu->kgd,
+ KGD_ENGINE_MEC1));
+
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
- ret = kfd_build_sysfs_node_entry(dev, 0);
+ ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd8dd50..96a512208fad 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA,
+ KGD_ENGINE_MAX
+};
+
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
+ * @get_fw_version: Returns FW versions from the header
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -170,12 +183,14 @@ struct kfd2kgd_calls {
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
- bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+ uint16_t (*get_fw_version)(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a773151c..bbdbe4721573 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+ crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 52ce26d6b4fb..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+static void remove_from_modeset(struct drm_mode_set *set,
+ struct drm_connector *connector)
+{
+ int i, j;
+
+ for (i = 0; i < set->num_connectors; i++) {
+ if (set->connectors[i] == connector)
+ break;
+ }
+
+ if (i == set->num_connectors)
+ return;
+
+ for (j = i + 1; j < set->num_connectors; j++) {
+ set->connectors[j - 1] = set->connectors[j];
+ }
+ set->num_connectors--;
+
+ /* because i915 is pissy about this..
+ * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+ */
+ if (set->num_connectors == 0)
+ set->fb = NULL;
+}
+
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
+
+ /* also cleanup dangling references to the connector: */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18efa5b..4d79dad9d44f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
+ * This is the legacy version of drm_crtc_vblank_count().
+ *
* Returns:
* The software vblank counter.
*/
@@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * This is the native KMS version of drm_vblank_count().
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+ return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
+/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
*
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
+ *
+ * This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
@@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
EXPORT_SYMBOL(drm_send_vblank_event);
/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ *
+ * This is the native KMS version of drm_send_vblank_event().
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
+/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+ return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 121470a83d1a..1bcbe07cecfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -645,18 +645,6 @@ static int exynos_drm_init(void)
if (!is_exynos)
return -ENODEV;
- /*
- * Register device object only in case of Exynos SoC.
- *
- * Below codes resolves temporarily infinite loop issue incurred
- * by Exynos drm driver when using multi-platform kernel.
- * So these codes will be replaced with more generic way later.
- */
- if (!of_machine_is_compatible("samsung,exynos3") &&
- !of_machine_is_compatible("samsung,exynos4") &&
- !of_machine_is_compatible("samsung,exynos5"))
- return -ENODEV;
-
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5765a161abdd..98051e8e855a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
- u8 buffer[2];
u32 reg;
clk_disable_unprepare(hdata->res.sclk_hdmi);
@@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* operation mode */
- buffer[0] = 0x1f;
- buffer[1] = 0x00;
-
- if (hdata->hdmiphy_port)
- i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 820b76234ef4..064ed6597def 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ int err;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
- drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ if (err < 0) {
+ DRM_DEBUG_KMS("failed to acquire vblank counter\n");
+ return;
+ }
atomic_set(&mixer_ctx->wait_vsync_event, 1);
@@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
return 0;
}
@@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev);
mixer_mgr_remove(&ctx->manager);
-
- pm_runtime_disable(dev);
}
static const struct component_ops mixer_component_ops = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
+ struct mutex mutex;
+ struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return ret;
+ goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
- return ret;
+ goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tda998x_priv *priv =
+ container_of(dwork, struct tda998x_priv, dwork);
+
+ if (priv->encoder && priv->encoder->dev)
+ drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
- if (priv->encoder && priv->encoder->dev)
- drm_helper_hpd_irq_event(priv->encoder->dev);
+ schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (priv->hdmi->irq)
+ if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
+ cancel_delayed_work_sync(&priv->dwork);
+ }
i2c_unregister_device(priv->cec);
}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
+ unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
- priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ cec_addr = 0x34 + (client->addr & 0x03);
+ priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
+ mutex_init(&priv->mutex); /* protect the page access */
+
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
- /* init read EDID waitqueue */
+ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
+ INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c3efb..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
@@ -811,6 +805,8 @@ int i915_reset(struct drm_device *dev)
if (!i915.reset)
return 0;
+ intel_reset_gt_powersave(dev);
+
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -880,7 +876,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_reset_gt_powersave(dev);
+ intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1584,7 +1580,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_dumb_map_offset,
+ .dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5541ec..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
*/
struct workqueue_struct *dp_wq;
- uint32_t bios_vgacntr;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -2161,8 +2159,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
@@ -2501,9 +2498,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea626db..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
- bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
- obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, true, &args->handle);
+ args->size, &args->handle);
}
/**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, false, &args->handle);
+ args->size, &args->handle);
}
static inline int
@@ -1050,6 +1048,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1069,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
+ goto put_rpm;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
@@ -1123,6 +1124,9 @@ out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
+put_rpm:
+ intel_runtime_pm_put(dev_priv);
+
return ret;
}
@@ -1840,10 +1844,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-static int
+int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle, bool dumb,
+ uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1864,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1891,15 +1888,6 @@ unlock:
return ret;
}
-int
-i915_gem_dumb_map_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- return i915_gem_mmap_gtt(file, dev, handle, true, offset);
-}
-
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1921,7 +1909,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static inline int
@@ -3160,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4896,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
@@ -5167,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff435f276..d011ec82ef1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
- int ret;
+ const int num_rings =
+ /* Use an extended w/a on ivb+ if signalling from other rings */
+ i915_semaphore_is_enabled(ring->dev) ?
+ hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ 0;
+ int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
- ret = intel_ring_begin(ring, 6);
+
+ len = 4;
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+ ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
*/
intel_ring_emit(ring, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ }
intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027ba3ee5..11738316394a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
- WARN_ONCE(obj->base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b0f9b6..b051a238baf9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -281,13 +281,34 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
+
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+ dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return mask;
+}
+
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -300,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
- ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -3307,8 +3327,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
- pm_irqs |= dev_priv->pm_rps_events;
-
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -3520,7 +3542,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3609,7 +3635,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
- dev_priv->irq_mask = 0;
+ dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3715,8 +3741,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3726,6 +3750,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
@@ -3897,8 +3922,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3908,6 +3931,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc238f70b..172de3b3433b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -395,6 +395,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d429191..e7a16f119a29 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- /*
- * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
- * from S3 without preserving (some of?) the other bits.
- */
- I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_shared_dpll_init(dev);
- /* save the BIOS value before clobbering it */
- dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb16d4e0..3b40a17b8852 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e273c8..bf814a64582a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
- mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (IS_GEN8(dev_priv->dev))
- mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return ~mask;
+ return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
/* gen6_set_rps is called to update the frequency request, but should also be
@@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
/* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true);
@@ -6191,6 +6183,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
valleyview_cleanup_gt_powersave(dev);
}
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
+}
+
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
@@ -6206,14 +6212,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_suspend_rps(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -6316,8 +6315,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
- intel_enable_gt_powersave(dev);
}
static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9a75d1..c7bc93d28d84 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d53e297..ac6da7102fbb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
vlv_power_sequencer_reset(dev_priv);
}
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
-
- check_power_well_state(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
-
- check_power_well_state(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa873048308b..94a5bee69fe7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo);
}
- if (gpu->pm4)
- release_firmware(gpu->pm4);
- if (gpu->pfp)
- release_firmware(gpu->pfp);
+ release_firmware(gpu->pm4);
+ release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb0405d76..b4e70e0e3cfa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
uint32_t hpd_ctrl;
int i, ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
ret = gpio_config(hdmi, true);
if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
return ret;
}
-static int hdp_disable(struct hdmi_connector *hdmi_connector)
+static void hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
- if (ret) {
- dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
- goto fail;
- }
-
- return 0;
+ if (ret)
+ dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
-fail:
- return ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
}
static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
- DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
-
- /* ack the irq: */
+ /* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
- hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e100d8b..3449213f1e76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_device *dev = crtc->dev;
-
DBG("%s: check", mdp4_crtc->name);
-
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
// TODO anything else to check?
-
return 0;
}
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp4_crtc->name);
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3a82d7..f021f960a8a2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", mdp5_crtc->name);
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp5_crtc->name);
+ DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
-
- /* when called from modeset_init(), skip the rest until later: */
- if (!mdp5_kms)
- return;
+ mdp_irq_update(&mdp5_kms->base);
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b80c488..9f01a4f21af2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
- /* NOTE: the vsync and error irq's are actually associated with
- * the INTF/encoder.. the easiest way to deal with this (ie. what
- * we do now) is assume a fixed relationship between crtc's and
- * encoders. I'm not sure if there is ever a need to more freely
- * assign crtcs to encoders, but if there is then we need to take
- * care of error and vblank irq's that the crtc has registered,
- * and also update user-requested vblank_mask.
- */
- encoder->possible_crtcs = BIT(0);
- mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
-
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b64a245..2a731722d840 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
}
-static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5ad4fd..b268ce95d394 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
-
+void mdp_irq_update(struct mdp_kms *mdp_kms);
/*
* pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412e13dc..191968256c58 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@ struct msm_commit {
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
+ uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
+/* block until specified crtcs are no longer pending update, and
+ * atomically mark them as pending update
+ */
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ int ret;
+
+ spin_lock(&priv->pending_crtcs_event.lock);
+ ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+ !(priv->pending_crtcs & crtc_mask));
+ if (ret == 0) {
+ DBG("start: %08x", crtc_mask);
+ priv->pending_crtcs |= crtc_mask;
+ }
+ spin_unlock(&priv->pending_crtcs_event.lock);
+
+ return ret;
+}
+
+/* clear specified crtcs (no longer pending update)
+ */
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ spin_lock(&priv->pending_crtcs_event.lock);
+ DBG("end: %08x", crtc_mask);
+ priv->pending_crtcs &= ~crtc_mask;
+ wake_up_all_locked(&priv->pending_crtcs_event);
+ spin_unlock(&priv->pending_crtcs_event.lock);
+}
+
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_commit_post_planes(dev, state);
+ /* NOTE: _wait_for_vblanks() only waits for vblank on
+ * enabled CRTCs. So we end up faulting when disabling
+ * due to (potentially) unref'ing the outgoing fb's
+ * before the vblank when the disable has latched.
+ *
+ * But if it did wait on disabled (or newly disabled)
+ * CRTCs, that would be racy (ie. we could have missed
+ * the irq. We need some way to poll for pipe shut
+ * down. Or just live with occasionally hitting the
+ * timeout in the CRTC disable path (which really should
+ * not be critical path)
+ */
+
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
+ end_atomic(dev->dev_private, c->crtc_mask);
+
kfree(c);
}
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
- struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
return ret;
c = new_commit(state);
+ if (!c)
+ return -ENOMEM;
+
+ /*
+ * Figure out what crtcs we have:
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ }
/*
* Figure out what fence to wait for:
@@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev,
}
/*
+ * Wait for pending updates on any of the same crtc's and then
+ * mark our set of crtc's as busy:
+ */
+ ret = start_atomic(dev->dev_private, c->crtc_mask);
+ if (ret)
+ return ret;
+
+ /*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217e1bfc..9a61546a0b05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 136303818436..b69ef2d5a26c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
+ /* crtcs pending async atomic updates: */
+ uint32_t pending_crtcs;
+ wait_queue_head_t pending_crtcs_event;
+
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e526b4e..1f3af13ccede 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@ fail_unlock:
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+ framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e49d5b5..49dea4fb55ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_free_large(msm_obj->pages);
} else {
- if (msm_obj->vaddr)
- vunmap(msm_obj->vaddr);
+ vunmap(msm_obj->vaddr);
put_pages(obj);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ff2b434b3db4..760947e380c9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -26,7 +26,7 @@
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index d1bcde55e9d7..839a32577680 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
struct nvkm_event *event = notify->event;
unsigned long flags;
- BUG_ON(!spin_is_locked(&event->list_lock));
+ assert_spin_locked(&event->list_lock);
BUG_ON(size != notify->size);
spin_lock_irqsave(&event->refs_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 674da1f095b2..732922690653 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
break;
+ case 0x106:
+ device->cname = "GK208B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
case 0x108:
device->cname = "GK208";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
index 5e58bba0dd5c..a7a890fad1e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -44,8 +44,10 @@ static void
pramin_fini(void *data)
{
struct priv *priv = data;
- nv_wr32(priv->bios, 0x001700, priv->bar0);
- kfree(priv);
+ if (priv) {
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+ }
}
static void *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
index 00f2ca7e44a5..033a8e999497 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -24,34 +24,71 @@
#include "nv50.h"
+struct nvaa_ram_priv {
+ struct nouveau_ram base;
+ u64 poller_base;
+};
+
static int
nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ u32 rsvd_tail = (1024 * 1024); /* vbios etc */
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvaa_ram_priv *priv;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+ priv->base.type = NV_MEM_TYPE_STOLEN;
+ priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail), 1);
+ rsvd_tail += 0x1000;
+ priv->poller_base = priv->base.size - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+ (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
+ 1);
if (ret)
return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
- ram->get = nv50_ram_get;
- ram->put = nv50_ram_put;
+ priv->base.get = nv50_ram_get;
+ priv->base.put = nv50_ram_put;
+ return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvaa_ram_priv *priv = (void *)object;
+ int ret;
+ u64 dniso, hostnb, flush;
+
+ ret = nouveau_ram_init(&priv->base);
+ if (ret)
+ return ret;
+
+ dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+ hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+ flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+ /* Enable NISO poller for various clients and set their associated
+ * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ */
+ nv_wr32(pfb, 0x100c18, dniso);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+ nv_wr32(pfb, 0x100c1c, hostnb);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+ nv_wr32(pfb, 0x100c24, flush);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
return 0;
}
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvaa_ram_ctor,
.dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
+ .init = nvaa_ram_init,
.fini = _nouveau_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
index a75c35ccf25c..165401c4045c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -24,13 +24,6 @@
#include "nv04.h"
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr08(priv, 0x088050, 0xff);
-}
-
struct nouveau_oclass *
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.fini = _nouveau_mc_fini,
},
.intr = nv04_mc_intr,
- .msi_rearm = nv4c_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 21ec561edc99..bba2960d3dfb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
* so use the DMA API for them.
*/
if (!nv_device_is_cpu_coherent(device) &&
- ttm->caching_state == tt_uncached)
+ ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
+ return;
+ }
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902a91ab..f8042433752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(!(gem->dumb || gem->import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a22a4bf..bf0f9e21d714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -36,7 +36,14 @@ void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ struct device *dev = drm->dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0 && ret != -EACCES))
+ return;
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
+ struct device *dev = drm->dev->dev;
int ret;
if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret) {
+ if (ret)
kfree(vma);
- goto out;
- }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
} else {
vma->refcount++;
}
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
- if (--vma->refcount == 0)
- nouveau_gem_object_unmap(nvbo, vma);
+ if (--vma->refcount == 0) {
+ ret = pm_runtime_get_sync(dev);
+ if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+ nouveau_gem_object_unmap(nvbo, vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ }
}
ttm_bo_unreserve(&nvbo->bo);
}
@@ -444,9 +469,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- WARN_ONCE(nvbo->gem.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6def61e7..3d1cfcb96b6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "drm_legacy.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d59ec491dbb9..ed644a4f6f57 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return pll;
}
/* otherwise, pick one of the plls */
- if ((rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_KABINI) ||
+ if ((rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
- /* KB/KV/ML has PPLL1 and PPLL2 */
+ /* KB/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- /* CI has PPLL0, PPLL1, and PPLL2 */
+ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 11ba9d21b89b..db42a670f995 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dcde3798b45..64fdae558d36 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e29eb2..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -903,6 +902,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@@ -943,5 +945,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* reference */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ba85986febea..03003f8a6de6 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2156,4 +2156,6 @@
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+#define IH_VMID_0_LUT 0x3D40u
+
#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 2fe8cfc966d9..bafdf92a5732 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9b42001295ba..e3e9c10cfba9 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (radeon_bapm == -1) {
- /* There are stability issues reported on with
- * bapm enabled on an asrock system.
- */
- if (rdev->pdev->subsystem_vendor == 0x1849)
- pi->bapm_enable = false;
- else
+ /* only enable bapm on KB, ML by default */
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
pi->bapm_enable = true;
+ else
+ pi->bapm_enable = false;
} else if (radeon_bapm == 0) {
pi->bapm_enable = false;
} else {
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 360de9f1f491..aea48c89b241 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 50f88611ff60..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+ radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0); /* value */
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2e12e4d69253..ad7125486894 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -1272,6 +1289,13 @@
(1 << 21) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
+ (1 << 27) | \
+ (1 << 26))
+
+#define DMA_SRBM_READ_PACKET ((9 << 28) | \
+ (1 << 27))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+ return addr;
+}
+
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+ uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = rdev->gart.ptr;
-
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
+ return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
+ writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
+ uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
- dma_addr_t *pages_addr;
+ uint64_t *pages_entry;
bool ready;
};
@@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
+ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 850de57069be..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
.set_wptr = &r100_gfx_set_wptr,
};
+static struct radeon_asic_ring rv515_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
+ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+ RADEON_GART_PAGE_DUMMY);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
+
+ if (!r) {
+ int i;
+
+ /* We might have dropped some GART table updates while it wasn't
+ * mapped, restore all entries
+ */
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
+
return r;
}
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
- u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
- page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base,
- RADEON_GART_PAGE_DUMMY);
+ radeon_gart_set_page(rdev, t,
+ rdev->dummy_page.entry);
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
- uint64_t page_base;
+ uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base, flags);
- page_base += RADEON_GPU_PAGE_SIZE;
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ page_entry = radeon_gart_get_page_entry(page_base, flags);
+ rdev->gart.pages_entry[t] = page_entry;
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_entry);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages);
- if (rdev->gart.pages_addr == NULL) {
+ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+ rdev->gart.num_gpu_pages);
+ if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
- rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
- }
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
- rdev->gart.pages_addr = NULL;
+ rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f229043e..d0b4f7d1140d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-static int radeon_mode_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, bool dumb,
- uint64_t *offset_p)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
- "Illegal dumb map of GPU buffer.\n");
-
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
return 0;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- return radeon_mode_mmap(filp, dev, handle, true, offset_p);
-}
-
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_mmap(filp, dev, args->handle, false,
- &args->addr_ptr);
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -593,7 +576,7 @@ error_unreserve:
error_free:
drm_free_large(vm_bos);
- if (r)
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
- gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d02068ec3..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,6 +28,8 @@
#include "cikd.h"
#include "cik_reg.h"
#include "radeon_kfd.h"
+#include "radeon_ucode.h"
+#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4)
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
* Register access functions
@@ -69,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -89,14 +92,16 @@ static const struct kfd2kgd_calls kfd2kgd = {
.init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
- .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
+ .get_fw_version = get_fw_version
};
static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
+#if defined(CONFIG_HSA_AMD_MODULE)
bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
const struct kgd2kfd_calls**);
@@ -113,6 +118,17 @@ bool radeon_kfd_init(void)
}
return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
}
void radeon_kfd_fini(void)
@@ -374,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
cpu_relax();
write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+ /* Mapping vmid to pasid also for IH block */
+ write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+ pasid_mapping);
+
return 0;
}
@@ -416,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -513,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t act;
@@ -552,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
+ release_queue(kgd);
return -ETIME;
}
msleep(20);
@@ -561,3 +582,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
release_queue(kgd);
return 0;
}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct radeon_device *rdev = (struct radeon_device *) kgd;
+ const union radeon_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union radeon_firmware_header *)
+ rdev->mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA:
+ hdr = (const union radeon_firmware_header *)
+ rdev->sdma_fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223eb469..86fc56434b28 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
- WARN_ONCE(bo->gem_base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 32522cc940a1..f7da8fe96a66 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1287,8 +1287,39 @@ dpm_failed:
return ret;
}
+struct radeon_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+ { 0, 0, 0, 0 },
+};
+
int radeon_pm_init(struct radeon_device *rdev)
{
+ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+ bool disable_dpm = false;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ disable_dpm = true;
+ break;
+ }
+ ++p;
+ }
+
/* enable dpm on rv6xx+ */
switch (rdev->family) {
case CHIP_RV610:
@@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (disable_dpm && (radeon_dpm == -1))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 535403e0c8a2..15aee723db77 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
u32 format;
u32 *buffer;
const u8 __user *data;
- int size, dwords, tex_width, blit_width, spitch;
+ unsigned int size, dwords, tex_width, blit_width, spitch;
u32 height;
int i;
u32 texpitch, microtile;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..06d2246d07f1 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+ result &= ~RADEON_GPU_PAGE_MASK;
return result;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
- entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ u32 *gtt = rdev->gart.ptr;
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
-
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
- writeq(addr, ptr + (i * 8));
+ return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 60df444bd075..5d89b874a1a2 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index f5cc777e1c5f..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+ radeon_ring_write(ring, 0xff << 16); /* retry */
+ radeon_ring_write(ring, 1 << vm_id); /* mask */
+ radeon_ring_write(ring, 0); /* value */
+ radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
/**
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 32e354b8b0ab..eff8a6444956 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
@@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
}
/* XXX validate the min clocks required for display */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4069be89e585..84999242c747 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1632,6 +1632,23 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41
@@ -1835,6 +1852,7 @@
#define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
#define VCE_STATUS 0x20004
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960286a6..978993fa3a36 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value;
+ unsigned long value, flags;
bool yuv, planar;
/*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else
bpp = planar ? 1 : 2;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
+ unsigned long value, flags;
unsigned int format, swap;
- unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
+ spin_lock_irqsave(&dc->lock, flags);
+
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base;
struct tegra_bo *bo;
- if (!dc->event)
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (!dc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
return;
+ }
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
+ spin_lock_irqsave(&dc->lock, flags);
+
/* check if new start address has been latched */
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
- spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, dc->pipe, dc->event);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_send_vblank_event(crtc, dc->event);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
+ unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
- event->pipe = dc->pipe;
+ event->pipe = pipe;
dc->event = event;
- drm_vblank_get(drm, dc->pipe);
+ drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
- drm_handle_vblank(dc->base.dev, dc->pipe);
+ drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afeece1f..d4f827593dfa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
+ unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
+ if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+
+ if (!crtc)
+ return 0;
+
/* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
+ return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
}
}
-static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
+ struct scatterlist *s;
+ struct sg_table *sgt;
+ unsigned int i;
+
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
- bo->num_pages = size >> PAGE_SHIFT;
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt)) {
- drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ bo->num_pages = bo->gem.size >> PAGE_SHIFT;
+
+ sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(sgt))
+ goto put_pages;
+
+ /*
+ * Fake up the SG table so that dma_map_sg() can be used to flush the
+ * pages associated with it. Note that this relies on the fact that
+ * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
+ * only cache maintenance.
+ *
+ * TODO: Replace this by drm_clflash_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
+ sgt = ERR_PTR(-ENOMEM);
+ goto release_sgt;
}
+ bo->sgt = sgt;
+
return 0;
+
+release_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+put_pages:
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(sgt);
}
-static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
- err = tegra_bo_get_pages(drm, bo, size);
+ err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err;
}
} else {
+ size_t size = bo->gem.size;
+
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo))
return bo;
- err = tegra_bo_alloc(drm, bo, size);
+ err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
+ else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
- mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->hw_lock);
+ spin_lock_init(&dev_priv->waiter_lock);
+ spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
- if (unlikely(ret != 0)) {
- mutex_unlock(&dev_priv->hw_mutex);
+ if (unlikely(ret != 0))
goto out_err0;
- }
/*
* Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
- mutex_unlock(&dev_priv->hw_mutex);
-
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work, ping_work;
+ struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga";
}
-static void vmw_fence_ping_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, ping_work);
-
- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- if (mutex_trylock(&dev_priv->hw_mutex)) {
- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
- } else
- schedule_work(&fman->ping_work);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
return true;
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob)
return false;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
- mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ static DEFINE_SPINLOCK(ping_lock);
+ unsigned long irq_flags;
+ /*
+ * The ping_lock is needed because we don't have an atomic
+ * test-and-set of the SVGA_FIFO_BUSY register.
+ */
+ spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_fifo_ping_host_locked(dev_priv, reason);
-
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return 0;
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
- uint32_t busy;
- mutex_lock(&dev_priv->hw_mutex);
- busy = vmw_read(dev_priv, SVGA_REG_BUSY);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return (busy == 0);
+ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
- mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 230b6f887cd8..dfdc26970022 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -27,7 +27,8 @@ if HID
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
- depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ depends on HID
+ select POWER_SUPPLY
default n
---help---
This option adds support of reporting battery strength (for HID devices
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c3d0ac1a0988..8b638792cb43 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7460f3402298..9243359c1821 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -526,6 +526,7 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_VENDOR_ID_LABTEC 0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e0a0f06ac5ef..9505605b6e22 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
switch (id->product) {
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
ret = kye_tablet_enable(hdev);
if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c917ab61aafa..5bc6d80d5be7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
+ if (size != DJREPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
- /* intentional fallthrough */
+ if (size != HIDPP_REPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev,
+ "Short HID++ report of bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
+ if (size != HIDPP_REPORT_LONG_LENGTH) {
+ dev_err(&hdev->dev,
+ "Long HID++ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_hidpp_event(hdev, report, data, size);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2f420c0b6609..a93cefe0e522 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
(report->rap.sub_id == 0x41);
}
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+ int new_length;
+ char *new_name;
+
+ if (name_length > PREFIX_LENGTH &&
+ strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+ /* The prefix has is already in the name */
+ return;
+
+ new_length = PREFIX_LENGTH + name_length;
+ new_name = kzalloc(new_length, GFP_KERNEL);
+ if (!new_name)
+ return;
+
+ snprintf(new_name, new_length, "Logitech %s", *name);
+
+ kfree(*name);
+
+ *name = new_name;
+}
+
/* -------------------------------------------------------------------------- */
/* HIDP++ 1.0 commands */
/* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
return NULL;
memcpy(name, &response.rap.params[2], len);
+
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, len + 1);
+
return name;
}
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
index += ret;
}
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, __name_length + 1);
+
return name;
}
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[0]) {
case 0x02:
+ if (size < 2) {
+ hid_err(hdev, "Received HID report of bad size (%d)",
+ size);
+ return 1;
+ }
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
input_event(wd->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
input_event(wd->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
input_sync(wd->input);
+ return 0;
} else {
if (size < 21)
return 1;
return wtp_mouse_raw_xy_event(hidpp, &data[7]);
}
case REPORT_ID_HIDPP_LONG:
+ /* size is already checked in hidpp_raw_event. */
if ((report->fap.feature_index != wd->mt_feature_index) ||
(report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
return 1;
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
+ if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return;
pyra->actual_profile = new_profile;
pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
}
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
- mutex_lock(&pyra->pyra_lock);
-
settings = (struct pyra_settings const *)buf;
+ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return -EINVAL;
+
+ mutex_lock(&pyra->pyra_lock);
retval = pyra_set_settings(usb_dev, settings);
if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d32037cbf9db..d43e967e7533 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
static void i2c_hid_stop(struct hid_device *hid)
{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
hid->claimed = 0;
-
- i2c_hid_free_buffers(ihid);
}
static int i2c_hid_open(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc89be90b35e..b27b3d33ebab 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6529c09c46f0..a7de26d1ac80 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_I5500
+ tristate "Intel 5500/5520/X58 temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+ This driver can also be built as a module. If so, the module
+ will be called i5500_temp.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 67280643bcf0..6c941472e707 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HTU21) += htu21.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644
index 000000000000..3e3ccbf18b4e
--- /dev/null
+++ b/drivers/hwmon/i5500_temp.c
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA 0xE2
+#define REG_TSCTRL 0xE8
+#define REG_TSTHRRPEX 0xEB
+#define REG_TSTHRLO 0xEC
+#define REG_TSTHRHI 0xEE
+#define REG_CTHINT 0xF0
+#define REG_TSFSC 0xF3
+#define REG_CTSTS 0xF4
+#define REG_TSTHRRQPI 0xF5
+#define REG_CTCTRL 0xF7
+#define REG_TSTIMER 0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ long temp;
+ u16 tsthrhi;
+ s8 tsfsc;
+
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ temp = ((long)tsthrhi - tsfsc) * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int reg = to_sensor_dev_attr(devattr)->index;
+ long temp;
+ u16 tsthr;
+
+ pci_read_config_word(pdev, reg, &tsthr);
+ temp = tsthr * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ u8 ctsts;
+
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct device *hwmon_dev;
+ u32 tstimer;
+ s8 tsfsc;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable device\n");
+ return err;
+ }
+
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+ if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+ dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "intel5500", NULL,
+ i5500_temp_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+ .name = "i5500_temp",
+ .id_table = i5500_temp_ids,
+ .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int ret;
pm_runtime_get_sync(&adap->dev);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return ret;
}
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
udelay(100);
}
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ clk_unprepare(i2c->clk);
return ret;
}
}
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ clk_unprepare(i2c->clk);
+
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
if (!IS_ERR(i2c->sysreg))
regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+ bool stop_after_dma;
struct resource *res;
struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
if (pd->pos == pd->msg->len) {
/* Send stop if we haven't yet (DMA case) */
- if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ if (pd->send_stop && pd->stop_after_dma)
i2c_op(pd, OP_TX_STOP, 0);
return 1;
}
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
real_pos = pd->pos - 2;
if (pd->pos == pd->msg->len) {
+ if (pd->stop_after_dma) {
+ /* Simulate PIO end condition after DMA transfer */
+ i2c_op(pd, OP_RX_STOP, 0);
+ pd->pos++;
+ break;
+ }
+
if (real_pos < 0) {
i2c_op(pd, OP_RX_STOP, 0);
break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
sh_mobile_i2c_dma_unmap(pd);
pd->pos = pd->msg->len;
+ pd->stop_after_dma = true;
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
bool do_start = pd->send_stop || !i;
msg = &msgs[i];
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+ pd->stop_after_dma = false;
err = start_ch(pd, msg, do_start);
if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
return ret;
}
EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e37412da15f5..b99de00e57b8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
case ad7998:
return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
val);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
val);
+ default:
+ /* Will be written when doing a conversion */
+ st->config = val;
+ return 0;
}
}
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
case ad7997:
case ad7998:
return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+ default:
+ /* No readback support */
+ return st->config;
}
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 866fe904cba2..90c8cb727cc7 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (val2 == NULL)
val2 = &unused;
+ if(!iio_channel_has_info(chan->channel, info))
+ return -EINVAL;
+
if (chan->indio_dev->info->read_raw_multi) {
ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
chan->channel, INDIO_MAX_RAW_ELEMENTS,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57ecc5b204f3..9117b7a2d5f8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
- if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8afa28e4570e..18d4b2c8fe55 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,6 +28,13 @@
#include <linux/cdev.h>
#include "input-compat.h"
+enum evdev_clock_type {
+ EV_CLK_REAL = 0,
+ EV_CLK_MONO,
+ EV_CLK_BOOT,
+ EV_CLK_MAX
+};
+
struct evdev {
int open;
struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int clkid;
+ int clk_type;
bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+ switch (clkid) {
+
+ case CLOCK_REALTIME:
+ client->clk_type = EV_CLK_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ client->clk_type = EV_CLK_MONO;
+ break;
+ case CLOCK_BOOTTIME:
+ client->clk_type = EV_CLK_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
struct input_event ev;
ktime_t time;
- time = (client->clkid == CLOCK_MONOTONIC) ?
- ktime_get() : ktime_get_real();
+ time = client->clk_type == EV_CLK_REAL ?
+ ktime_get_real() :
+ client->clk_type == EV_CLK_MONO ?
+ ktime_get() :
+ ktime_get_boottime();
ev.time = ktime_to_timeval(time);
ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
- ktime_t mono, ktime_t real)
+ ktime_t *ev_time)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
if (client->revoked)
return;
- event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
- mono : real);
+ event.time = ktime_to_timeval(ev_time[client->clk_type]);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
- ktime_t time_mono, time_real;
+ ktime_t ev_time[EV_CLK_MAX];
- time_mono = ktime_get();
- time_real = ktime_mono_to_real(time_mono);
+ ev_time[EV_CLK_MONO] = ktime_get();
+ ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+ ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+ TK_OFFS_BOOT);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
- evdev_pass_values(client, vals, count, time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
- evdev_pass_values(client, vals, count,
- time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
- if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
- return -EINVAL;
- client->clkid = i;
- return 0;
+
+ return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 04217c2e345c..213e3a1903ee 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
- for (i = 0; i < ABS_CNT; i++) {
- if (test_bit(i, dev->absbit)) {
- if (input_is_mt_axis(i))
- events += mt_slots;
- else
- events++;
+ if (test_bit(EV_ABS, dev->evbit)) {
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
}
}
- for (i = 0; i < REL_CNT; i++)
- if (test_bit(i, dev->relbit))
- events++;
+ if (test_bit(EV_REL, dev->evbit)) {
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+ }
/* Make room for KEY and MSC events */
events += 7;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 96ee26c555e0..a5d9b3f3c871 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ depends on OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d4dd78a7d56b..883d6aed5b9a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -35,9 +35,13 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
- struct timer_list timer;
- struct work_struct work;
- unsigned int timer_debounce; /* in msecs */
+
+ struct timer_list release_timer;
+ unsigned int release_delay; /* in msecs, for IRQ-only buttons */
+
+ struct delayed_work work;
+ unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
unsigned int irq;
spinlock_t lock;
bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
{
if (!bdata->disabled) {
/*
- * Disable IRQ and possible debouncing timer.
+ * Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
+
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
bdata->disabled = true;
}
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work);
+ container_of(work, struct gpio_button_data, work.work);
gpio_keys_gpio_report_event(bdata);
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
pm_relax(bdata->input->dev.parent);
}
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
- schedule_work(&bdata->work);
-}
-
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
if (bdata->button->wakeup)
pm_stay_awake(bdata->input->dev.parent);
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
- else
- schedule_work(&bdata->work);
+
+ mod_delayed_work(system_wq,
+ &bdata->work,
+ msecs_to_jiffies(bdata->software_debounce));
return IRQ_HANDLED;
}
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
input_event(input, EV_KEY, button->code, 1);
input_sync(input);
- if (!bdata->timer_debounce) {
+ if (!bdata->release_delay) {
input_event(input, EV_KEY, button->code, 0);
input_sync(input);
goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
bdata->key_pressed = true;
}
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ if (bdata->release_delay)
+ mod_timer(&bdata->release_timer,
+ jiffies + msecs_to_jiffies(bdata->release_delay));
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
-
- cancel_work_sync(&bdata->work);
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
}
static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
- bdata->timer_debounce =
+ bdata->software_debounce =
button->debounce_interval;
}
- irq = gpio_to_irq(button->gpio);
- if (irq < 0) {
- error = irq;
- dev_err(dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
- return error;
+ if (button->irq) {
+ bdata->irq = button->irq;
+ } else {
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev,
+ "Unable to get irq number for GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
+ bdata->irq = irq;
}
- bdata->irq = irq;
- INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
- setup_timer(&bdata->timer,
- gpio_keys_gpio_timer, (unsigned long)bdata);
+ INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
return -EINVAL;
}
- bdata->timer_debounce = button->debounce_interval;
- setup_timer(&bdata->timer,
+ bdata->release_delay = button->debounce_interval;
+ setup_timer(&bdata->release_timer,
gpio_keys_irq_timer, (unsigned long)bdata);
isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
- * Install custom action to cancel debounce timer and
+ * Install custom action to cancel release timer and
* workqueue item.
*/
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio = -1;
enum of_gpio_flags flags;
button = &pdata->buttons[i++];
- if (!of_find_property(pp, "gpios", NULL)) {
- button->irq = irq_of_parse_and_map(pp, 0);
- if (button->irq == 0) {
- i--;
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios or irqs\n");
- continue;
- }
- } else {
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
+ button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (button->gpio < 0) {
+ error = button->gpio;
+ if (error != -ENOENT) {
if (error != -EPROBE_DEFER)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
return ERR_PTR(error);
}
+ } else {
+ button->active_low = flags & OF_GPIO_ACTIVE_LOW;
}
- button->gpio = gpio;
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ button->irq = irq_of_parse_and_map(pp, 0);
+
+ if (!gpio_is_valid(button->gpio) && !button->irq) {
+ dev_err(dev, "Found button without gpios or irqs\n");
+ return ERR_PTR(-EINVAL);
+ }
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+ button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
if (of_property_read_u32(pp, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 610a8af795a1..5b152f25a8e1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ef5e67fb567e..fe6e3f22eed7 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -45,13 +45,14 @@
#define STMPE_KEYPAD_MAX_ROWS 8
#define STMPE_KEYPAD_MAX_COLS 8
#define STMPE_KEYPAD_ROW_SHIFT 3
-#define STMPE_KEYPAD_KEYMAP_SIZE \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
/**
* struct stmpe_keypad_variant - model-specific attributes
* @auto_increment: whether the KPC_DATA_BYTE register address
* auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
* @num_data: number of data bytes
* @num_normal_data: number of normal keys' data bytes
* @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
*/
struct stmpe_keypad_variant {
bool auto_increment;
+ bool set_pullup;
int num_data;
int num_normal_data;
int max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2401] = {
.auto_increment = false,
+ .set_pullup = true,
.num_data = 3,
.num_normal_data = 2,
.max_cols = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2403] = {
.auto_increment = true,
+ .set_pullup = true,
.num_data = 5,
.num_normal_data = 3,
.max_cols = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
};
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms. Maximum is
+ * %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
struct stmpe_keypad {
struct stmpe *stmpe;
struct input_dev *input;
const struct stmpe_keypad_variant *variant;
- const struct stmpe_keypad_platform_data *plat;
-
+ unsigned int debounce_ms;
+ unsigned int scan_count;
+ bool no_autorepeat;
unsigned int rows;
unsigned int cols;
-
- unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+ unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
};
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
unsigned int col_gpios = variant->col_gpios;
unsigned int row_gpios = variant->row_gpios;
struct stmpe *stmpe = keypad->stmpe;
+ u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
unsigned int pins = 0;
+ unsigned int pu_pins = 0;
+ int ret;
int i;
/*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
for (i = 0; i < variant->max_cols; i++) {
int num = __ffs(col_gpios);
- if (keypad->cols & (1 << i))
+ if (keypad->cols & (1 << i)) {
pins |= 1 << num;
+ pu_pins |= 1 << num;
+ }
col_gpios &= ~(1 << num);
}
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
row_gpios &= ~(1 << num);
}
- return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ if (ret)
+ return ret;
+
+ /*
+ * On STMPE24xx, set pin bias to pull-up on all keypad input
+ * pins (columns), this incidentally happen to be maximum 8 pins
+ * and placed at GPIO0-7 so only the LSB of the pull up register
+ * ever needs to be written.
+ */
+ if (variant->set_pullup) {
+ u8 val;
+
+ ret = stmpe_reg_read(stmpe, pureg);
+ if (ret)
+ return ret;
+
+ /* Do not touch unused pins, may be used for GPIO */
+ val = ret & ~pu_pins;
+ val |= pu_pins;
+
+ ret = stmpe_reg_write(stmpe, pureg, val);
+ }
+
+ return 0;
}
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
- const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
struct stmpe *stmpe = keypad->stmpe;
int ret;
- if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+ if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
return -EINVAL;
- if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+ if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
return -EINVAL;
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
- plat->scan_count << 4);
+ keypad->scan_count << 4);
if (ret < 0)
return ret;
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
STMPE_KPC_CTRL_LSB_SCAN |
STMPE_KPC_CTRL_LSB_DEBOUNCE,
STMPE_KPC_CTRL_LSB_SCAN |
- (plat->debounce_ms << 1));
+ (keypad->debounce_ms << 1));
}
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+ u32 used_rows, u32 used_cols)
{
int row, col;
- for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
- for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ for (row = 0; row < used_rows; row++) {
+ for (col = 0; col < used_cols; col++) {
int code = MATRIX_SCAN_CODE(row, col,
- STMPE_KEYPAD_ROW_SHIFT);
+ STMPE_KEYPAD_ROW_SHIFT);
if (keypad->keymap[code] != KEY_RESERVED) {
keypad->rows |= 1 << row;
keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
}
}
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct stmpe_keypad_platform_data *plat;
-
- if (!np)
- return ERR_PTR(-ENODEV);
-
- plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
- if (!plat)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
- of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
- plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
- return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static int stmpe_keypad_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- const struct stmpe_keypad_platform_data *plat;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_keypad *keypad;
struct input_dev *input;
+ u32 rows;
+ u32 cols;
int error;
int irq;
- plat = stmpe->pdata->keypad;
- if (!plat) {
- plat = stmpe_keypad_of_probe(&pdev->dev);
- if (IS_ERR(plat))
- return PTR_ERR(plat);
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
+ keypad->stmpe = stmpe;
+ keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+ of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+ keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
+ error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
keypad->keymap, input);
if (error)
return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
+ if (!keypad->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- stmpe_keypad_fill_used_pins(keypad);
+ stmpe_keypad_fill_used_pins(keypad, rows, cols);
- keypad->stmpe = stmpe;
- keypad->plat = plat;
keypad->input = input;
- keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
error = stmpe_keypad_chip_init(keypad);
if (error < 0)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d125a019383f..d88d73d83552 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
unsigned char *pkt,
unsigned char pkt_id)
{
+ /*
+ * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
+ * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
+ * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
+ * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+ * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+ * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
+ * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+ * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
+ * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
+ * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+ * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
+ * L: Left button
+ * R / M: Non-clickpads: Right / Middle button
+ * Clickpads: When > 2 fingers are down, and some fingers
+ * are in the button area, then the 2 coordinates reported
+ * are for fingers outside the button area and these report
+ * extra fingers being present in the right / left button
+ * area. Note these fingers are not added to the F field!
+ * so if a TWO packet is received and R = 1 then there are
+ * 3 fingers down, etc.
+ * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
+ * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+ * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+ * in NEW fmt
+ * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+ */
+
mt[0].x = ((pkt[2] & 0x80) << 4);
mt[0].x |= ((pkt[2] & 0x3F) << 5);
mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
static int alps_get_mt_count(struct input_mt_pos *mt)
{
- int i;
+ int i, fingers = 0;
- for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
- /* empty */;
+ for (i = 0; i < MAX_TOUCHES; i++) {
+ if (mt[i].x != 0 || mt[i].y != 0)
+ fingers++;
+ }
- return i;
+ return fingers;
}
static int alps_decode_packet_v7(struct alps_fields *f,
unsigned char *p,
struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char pkt_id;
pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
return 0;
if (pkt_id == V7_PACKET_ID_UNKNOWN)
return -1;
+ /*
+ * NEW packets are send to indicate a discontinuity in the finger
+ * coordinate reporting. Specifically a finger may have moved from
+ * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+ * us.
+ *
+ * NEW packets have 3 problems:
+ * 1) They do not contain middle / right button info (on non clickpads)
+ * this can be worked around by preserving the old button state
+ * 2) They do not contain an accurate fingercount, and they are
+ * typically send when the number of fingers changes. We cannot use
+ * the old finger count as that may mismatch with the amount of
+ * touch coordinates we've available in the NEW packet
+ * 3) Their x data for the second touch is inaccurate leading to
+ * a possible jump of the x coordinate by 16 units when the first
+ * non NEW packet comes in
+ * Since problems 2 & 3 cannot be worked around, just ignore them.
+ */
+ if (pkt_id == V7_PACKET_ID_NEW)
+ return 1;
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
- if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
- f->left = (p[0] & 0x80) >> 7;
+ if (pkt_id == V7_PACKET_ID_TWO)
+ f->fingers = alps_get_mt_count(f->mt);
+ else /* pkt_id == V7_PACKET_ID_MULTI */
+ f->fingers = 3 + (p[5] & 0x03);
+
+ f->left = (p[0] & 0x80) >> 7;
+ if (priv->flags & ALPS_BUTTONPAD) {
+ if (p[0] & 0x20)
+ f->fingers++;
+ if (p[0] & 0x10)
+ f->fingers++;
+ } else {
f->right = (p[0] & 0x20) >> 5;
f->middle = (p[0] & 0x10) >> 4;
}
- if (pkt_id == V7_PACKET_ID_TWO)
- f->fingers = alps_get_mt_count(f->mt);
- else if (pkt_id == V7_PACKET_ID_MULTI)
- f->fingers = 3 + (p[5] & 0x03);
+ /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+ if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+ f->mt[0].x = f->mt[1].x;
+ f->mt[0].y = f->mt[1].y;
+ f->mt[1].x = 0;
+ f->mt[1].y = 0;
+ }
return 0;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f2b978026407..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ },
+ },
#endif
{ }
};
@@ -1520,6 +1536,8 @@ static int elantech_set_properties(struct elantech_data *etd)
case 7:
case 8:
case 9:
+ case 10:
+ case 13:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN0039",
- "LEN2002", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ "LEN0039", "LEN2002", "LEN2004",
+ NULL},
1024, 5112, 2024, 4832
},
{
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
- "LEN0037",
+ "LEN0037", /* X1 Carbon 2nd */
"LEN0038",
"LEN0039", /* T440s */
"LEN0041",
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 30c8b6998808..354d47ecd66a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
+ &psmouse_attr_drift_time.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+ TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
/* toggles */
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+ TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
/* toggles */
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index ecd0547964a5..5617ed3a7d7a 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -70,6 +70,9 @@
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
#define TP_Z_TIME 0x5E /* How sharp of a press */
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
+#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
+ /* must last (x*107ms) for drift */
+ /* correction to occur */
/*
* Toggling Flag bits
@@ -120,6 +123,7 @@
#define TP_DEF_UP_THRESH 0xFF
#define TP_DEF_Z_TIME 0x26
#define TP_DEF_JENKS_CURV 0x87
+#define TP_DEF_DRIFT_TIME 0x05
/* Toggles */
#define TP_DEF_MB 0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
unsigned char draghys, mindrag;
unsigned char thresh, upthresh;
unsigned char ztime, jenks;
+ unsigned char drift_time;
/* toggles */
unsigned char press_to_select;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c66d1b53843e..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Medion Akoya E7225 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
/* Blue FB5601 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
@@ -415,6 +423,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 7738 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
@@ -745,6 +760,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{ }
};
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ {
+ /* Gigabyte P35 v2 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ },
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ },
+ {
+ /* Gigabyte P34 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1040,6 +1084,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+ if (dmi_check_system(i8042_dmi_kbdreset_table))
+ i8042_kbdreset = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 924e4bf357fb..986a71c614b0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
module_param_named(notimeout, i8042_notimeout, bool, 0);
MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
return -1;
/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+ if (i8042_kbdreset) {
+ pr_warn("Attempting to reset device connected to KBD port\n");
+ i8042_kbd_write(NULL, (unsigned char) 0xff);
+ }
+
+/*
* Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
* used it for a PCI card or somethig else.
*/
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index bb070206223c..95ee92a91bd2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -99,13 +99,9 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-struct t7_config {
- u8 idle;
- u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN 0
-#define MXT_POWER_CFG_DEEPSLEEP 1
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@@ -117,6 +113,7 @@ struct t7_config {
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -256,7 +253,6 @@ struct mxt_data {
bool update_input;
u8 last_message_count;
u8 num_touchids;
- struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
data->t6_status = status;
}
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object || offset >= mxt_obj_size(object))
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
return error;
}
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
- struct device *dev = &data->client->dev;
- int error;
- struct t7_config *new_config;
- struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
- if (sleep == MXT_POWER_CFG_DEEPSLEEP)
- new_config = &deepsleep;
- else
- new_config = &data->t7_cfg;
-
- error = __mxt_write_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), new_config);
- if (error)
- return error;
-
- dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
- new_config->active, new_config->idle);
-
- return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
- struct device *dev = &data->client->dev;
- int error;
- bool retry = false;
-
-recheck:
- error = __mxt_read_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), &data->t7_cfg);
- if (error)
- return error;
-
- if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
- if (!retry) {
- dev_dbg(dev, "T7 cfg zero, resetting\n");
- mxt_soft_reset(data);
- retry = true;
- goto recheck;
- } else {
- dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
- data->t7_cfg.active = 20;
- data->t7_cfg.idle = 100;
- return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
- }
- }
-
- dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
- data->t7_cfg.active, data->t7_cfg.idle);
- return 0;
-}
-
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "Error %d updating config\n", error);
}
- error = mxt_init_t7_power_cfg(data);
- if (error) {
- dev_err(dev, "Failed to initialize power cfg\n");
- return error;
- }
-
error = mxt_initialize_t9_input_device(data);
if (error)
return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
- /* Recalibrate since chip has been in deep sleep */
- mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
+ mxt_soft_reset(data);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3793fcc7e5db..d4c24fb7704f 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
}
#define EDT_ATTR_CHECKSET(name, reg) \
+do { \
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
- edt_ft5x06_register_write(tsdata, reg, pdata->name)
+ edt_ft5x06_register_write(tsdata, reg, pdata->name); \
+} while (0)
#define EDT_GET_PROP(name, reg) { \
u32 val; \
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..40dfbc0444c0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
if (action != BUS_NOTIFY_REMOVED_DEVICE)
return 0;
- /*
- * If the device is still attached to a device driver we can't
- * tear down the domain yet as DMA mappings may still be in use.
- * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
- */
- if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
- return 0;
-
domain = find_domain(dev);
if (!domain)
return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..748693192c20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
static u64 ipmmu_page_prot(unsigned int prot, u64 type)
{
- u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+ u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
| ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
| ARM_VMSA_PTE_NS | type;
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
if (prot & IOMMU_CACHE)
pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
- if (prot & IOMMU_EXEC)
- pgprot &= ~ARM_VMSA_PTE_XN;
+ if (prot & IOMMU_NOEXEC)
+ pgprot |= ARM_VMSA_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
/* If no access create a faulting entry to avoid TLB fills. */
pgprot &= ~ARM_VMSA_PTE_PAGE;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index d111ac779c40..63cd031b2c28 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -28,7 +28,7 @@
#define AT91_AIC_IRQ_MIN_PRIORITY 0
#define AT91_AIC_IRQ_MAX_PRIORITY 7
-#define AT91_AIC_SRCTYPE GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE GENMASK(6, 5)
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return -EINVAL;
}
- *val &= AT91_AIC_SRCTYPE;
+ *val &= ~AT91_AIC_SRCTYPE;
*val |= aic_type;
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 86e4684adeb1..d8996bdf0f61 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* of two entries. No, the architecture doesn't let you
* express an ITT with a single entry.
*/
- nr_ites = max(2, roundup_pow_of_two(nvecs));
+ nr_ites = max(2UL, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kmalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 29b8f21b74d0..6bc2deb73d53 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
* It will be refined as each CPU probes its ID.
*/
for (i = 0; i < NR_HIP04_CPU_IF; i++)
- hip04_cpu_map[i] = 0xff;
+ hip04_cpu_map[i] = 0xffff;
/*
* Find out how many interrupts are supported.
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7e342df6a62f..0b0d2c00a2df 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
return -ENOMEM;
chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
- if (!chip_data->intpol_base) {
+ if (IS_ERR(chip_data->intpol_base)) {
pr_err("mtk_sysirq: unable to map sysirq register\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(chip_data->intpol_base);
goto out_free;
}
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 28718d3e8281..c03f140acbae 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
return ret;
}
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
int j, irq_base;
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
irq_base = 0;
}
- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
{
int ret;
- if (node)
+ /*
+ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+ * depends is still not ready for linear IRQ domains; because of that
+ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+ * linear IRQ Domain until that driver is finally fixed.
+ */
+ if (of_device_is_compatible(node, "ti,omap2-intc") ||
+ of_device_is_compatible(node, "ti,omap3-intc")) {
+ struct resource res;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -ENOMEM;
+
+ base = res.start;
+ ret = omap_init_irq_legacy(base, node);
+ } else if (node) {
ret = omap_init_irq_of(node);
- else
- ret = omap_init_irq_legacy(base);
+ } else {
+ ret = omap_init_irq_legacy(base, NULL);
+ }
if (ret == 0)
omap_irq_enable_protection();
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..0b380603a578 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+ byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
byte force_mt_info = false;
byte dir;
dword d;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 26515c27ea8c..25e419752a7b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->sata = 0;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- /*
- * If available, expose the SATA activity blink capability through
- * a "sata" sysfs attribute.
- */
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
- led_dat->cdev.groups = netxbig_led_groups;
led_dat->mode_addr = template->mode_addr;
led_dat->mode_val = template->mode_val;
led_dat->bright_addr = template->bright_addr;
led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
led_dat->timer = pdata->timer;
led_dat->num_timer = pdata->num_timer;
+ /*
+ * If available, expose the SATA activity blink capability through
+ * a "sata" sysfs attribute.
+ */
+ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+ led_dat->cdev.groups = netxbig_led_groups;
return led_classdev_register(&pdev->dev, &led_dat->cdev);
}
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index f956ef26c0ce..fb7493dcfb79 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -7,6 +7,7 @@
#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
#define CHAMELEON_FILENAME_LEN 12
#define CHAMELEONV2_MAGIC 0xabce
+#define CHAM_HEADER_SIZE 0x200
enum chameleon_descriptor_type {
CHAMELEON_DTYPE_GENERAL = 0x0,
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index b59181965643..5e1bd5db02c8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -17,6 +17,7 @@
struct priv {
struct mcb_bus *bus;
+ phys_addr_t mapbase;
void __iomem *base;
};
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct resource *res;
struct priv *priv;
- phys_addr_t mapbase;
int ret;
int num_cells;
unsigned long flags;
@@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- mapbase = pci_resource_start(pdev, 0);
- if (!mapbase) {
+ priv->mapbase = pci_resource_start(pdev, 0);
+ if (!priv->mapbase) {
dev_err(&pdev->dev, "No PCI resource\n");
goto err_start;
}
- ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+ res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
+ KBUILD_MODNAME);
+ if (IS_ERR(res)) {
+ dev_err(&pdev->dev, "Failed to request PCI memory\n");
+ ret = PTR_ERR(res);
goto err_start;
}
- priv->base = pci_iomap(pdev, 0, 0);
+ priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
@@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv->bus->get_irq = mcb_pci_get_irq;
- ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+ ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
goto err_drvdata;
num_cells = ret;
@@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mcb_bus_add_devices(priv->bus);
+ return 0;
+
err_drvdata:
- pci_iounmap(pdev, priv->base);
+ iounmap(priv->base);
err_ioremap:
pci_release_region(pdev, 0);
err_start:
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
struct priv *priv = pci_get_drvdata(pdev);
mcb_release_bus(priv->bus);
+
+ iounmap(priv->base);
+ release_region(priv->mapbase, CHAM_HEADER_SIZE);
+ pci_disable_device(pdev);
}
static const struct pci_device_id mcb_pci_tbl[] = {
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9fc616c2755e..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
+ atomic_t ref_count;
+ struct list_head list;
+
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
/*----------------------------------------------------------------*/
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
- sector_t data_block_size,
- bool may_format_device,
- size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
{
int r;
struct dm_cache_metadata *cmd;
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ atomic_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
return cmd;
}
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances. This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+ struct dm_cache_metadata *cmd;
+
+ list_for_each_entry(cmd, &table, list)
+ if (cmd->bdev == bdev) {
+ atomic_inc(&cmd->ref_count);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd, *cmd2;
+
+ mutex_lock(&table_lock);
+ cmd = lookup(bdev);
+ mutex_unlock(&table_lock);
+
+ if (cmd)
+ return cmd;
+
+ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+ if (!IS_ERR(cmd)) {
+ mutex_lock(&table_lock);
+ cmd2 = lookup(bdev);
+ if (cmd2) {
+ mutex_unlock(&table_lock);
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ return cmd2;
+ }
+ list_add(&cmd->list, &table);
+ mutex_unlock(&table_lock);
+ }
+
+ return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+ if (cmd->data_block_size != data_block_size) {
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ (unsigned long long) data_block_size,
+ (unsigned long long) cmd->data_block_size);
+ return false;
+ }
+
+ return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ may_format_device, policy_hint_size);
+
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cmd;
+}
+
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- __destroy_persistent_data_objects(cmd);
- kfree(cmd);
+ if (atomic_dec_and_test(&cmd->ref_count)) {
+ mutex_lock(&table_lock);
+ list_del(&cmd->list);
+ mutex_unlock(&table_lock);
+
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ }
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1e96d7889f51..e1650539cc2f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -221,7 +221,13 @@ struct cache {
struct list_head need_commit_migrations;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
- atomic_t nr_migrations;
+ atomic_t nr_allocated_migrations;
+
+ /*
+ * The number of in flight migrations that are performing
+ * background io. eg, promotion, writeback.
+ */
+ atomic_t nr_io_migrations;
wait_queue_head_t quiescing_wait;
atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
struct dm_deferred_set *all_io_ds;
mempool_t *migration_pool;
- struct dm_cache_migration *next_migration;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
dm_bio_prison_free_cell(cache->prison, cell);
}
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+ struct dm_cache_migration *mg;
+
+ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (mg) {
+ mg->cache = cache;
+ atomic_inc(&mg->cache->nr_allocated_migrations);
+ }
+
+ return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+ wake_up(&mg->cache->migration_wait);
+
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
{
if (!p->mg) {
- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ p->mg = alloc_migration(cache);
if (!p->mg)
return -ENOMEM;
}
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
free_prison_cell(cache, p->cell1);
if (p->mg)
- mempool_free(p->mg, cache->migration_pool);
+ free_migration(p->mg);
}
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
* Migration covers moving data from the origin device to the cache, or
* vice versa.
*--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
- mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
{
- atomic_inc(&cache->nr_migrations);
+ atomic_inc(&cache->nr_io_migrations);
}
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
{
- atomic_dec(&cache->nr_migrations);
-
- /*
- * Wake the worker in case we're suspending the target.
- */
- wake_up(&cache->migration_wait);
+ atomic_dec(&cache->nr_io_migrations);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
wake_worker(cache);
}
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
{
- struct cache *cache = mg->cache;
+ dec_io_migrations(mg->cache);
free_migration(mg);
- dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
cell_defer(cache, mg->new_ocell, true);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
if (mg->writeback) {
clear_dirty(cache, mg->old_oblock, mg->cblock);
cell_defer(cache, mg->old_ocell, false);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
} else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
mg->old_oblock);
if (mg->promote)
cell_defer(cache, mg->new_ocell, true);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
} else {
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
}
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
} else {
if (mg->invalidate)
policy_remove_mapping(cache->policy, mg->old_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
}
} else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
}
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = cell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = new_ocell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
static bool spare_migration_bandwidth(struct cache *cache)
{
- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
return current_volume < cache->migration_threshold;
}
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
static void wait_for_migrations(struct cache *cache)
{
- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
}
static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->next_migration)
- mempool_free(cache->next_migration, cache->migration_pool);
-
if (cache->migration_pool)
mempool_destroy(cache->migration_pool);
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
- atomic_set(&cache->nr_migrations, 0);
+ atomic_set(&cache->nr_allocated_migrations, 0);
+ atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->next_migration = NULL;
-
cache->need_tick_bio = true;
cache->sized = false;
cache->invalidate = false;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8735543eacdb..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
+static void check_for_space(struct pool *pool)
+{
+ int r;
+ dm_block_t nr_free;
+
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
+ return;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
+ if (r)
+ return;
+
+ if (nr_free)
+ set_pool_mode(pool, PM_WRITE);
+}
+
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
+ else
+ check_for_space(pool);
return r;
}
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard;
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3367,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+ dm_device_name(pool->pool_md));
+ return -EINVAL;
+ }
+
if (!strcasecmp(argv[0], "create_thin"))
r = process_create_thin_mesg(argc, argv, pool);
@@ -3814,6 +3838,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
@@ -3826,9 +3852,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put(pool_md);
- atomic_set(&tc->refcount, 1);
- init_completion(&tc->can_destroy);
-
return 0;
bad:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4c06585bf165..2caf5b374649 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -206,6 +206,9 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
struct dm_stats stats;
};
@@ -899,7 +902,7 @@ static void disable_write_same(struct mapped_device *md)
static void clone_endio(struct bio *bio, int error)
{
- int r = 0;
+ int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
{
struct dm_table *map = NULL;
- if (dm_suspended_internally_md(md))
+ if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
static void __dm_internal_resume(struct mapped_device *md)
{
- if (!dm_suspended_internally_md(md))
+ BUG_ON(!md->internal_suspend_count);
+
+ if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index db99ca2613ba..06931f6fa26c 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR4400] = {
- .name = "Hauppauge WinTV-HVR4400",
+ .name = "Hauppauge WinTV-HVR4400/HVR5500",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
.tuner_addr = 0x60, /* 0xc0 >> 1 */
.tuner_bus = 1,
},
+ [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+ .name = "Hauppauge WinTV Starburst",
+ .portb = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_AVERMEDIA_HC81R] = {
.name = "AVerTV Hybrid Express Slim HC81R",
.tuner_type = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0xc108,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc138,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc12a,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc1f8,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x1461,
.subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
/* GPIO-8 tda10071 demod reset */
- /* GPIO-9 si2165 demod reset */
+ /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_T982:
ts1->gen_ctrl_val = 0x5; /* Parallel */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1d9d0f86ca8c..1ad49946d7fa 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
cx23885_shutdown(dev);
- pci_disable_device(pci_dev);
-
/* unregister stuff */
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
+
cx23885_dev_unregister(dev);
vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c47d18270cfc..a9c450d4b54e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S950:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index f55cd12da0fd..36f2f96c40e4 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -99,6 +99,7 @@
#define CX23885_BOARD_DVBSKY_S950 49
#define CX23885_BOARD_DVBSKY_S952 50
#define CX23885_BOARD_DVBSKY_T982 51
+#define CX23885_BOARD_HAUPPAUGE_STARBURST 52
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index b463fe172d16..3fe9047ef466 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8efe40337608..6d885239b16a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
{
strcpy(cap->driver, "atmel-isi");
strcpy(cap->card, "Atmel Image Sensor Interface");
- cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ce72bd26a6ac..192377f55840 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index a60c3bb0e4cc..0b3299dee05d 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index e6b93281f246..16f65ecb70a3 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 951226af0eba..8d6e343fec0f 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 0c1f55648106..9f1473c0a0cf 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8b27b3eb2b25..71787702d4a2 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 0f345b1f9014..f327c49d7e09 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
{
"Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[22], NULL },
+ { &cxusb_table[20], NULL },
},
}
};
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 1b158f1167ed..536210b39428 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
module_param_array(vbi_nr, int, NULL, 0444);
MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
-static struct v4l2_capability pvr_capability ={
- .driver = "pvrusb2",
- .card = "Hauppauge WinTV pvr-usb2",
- .bus_info = "usb",
- .version = LINUX_VERSION_CODE,
- .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE),
-};
-
static struct v4l2_fmtdesc pvr_fmtdesc [] = {
{
.index = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
sizeof(cap->bus_info));
strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+ switch (fh->pdi->devbase.vfl_type) {
+ case VFL_TYPE_GRABBER:
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+ break;
+ case VFL_TYPE_RADIO:
+ cap->device_caps = V4L2_CAP_RADIO;
+ break;
+ }
+ cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index d09a8916e940..bc08a829bc13 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
call_void_qop(q, wait_prepare, q);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
}
- if (threadio->stop)
- break;
- if (ret)
+ if (ret || threadio->stop)
break;
try_to_freeze();
vb = q->bufs[fileio->b.index];
if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
- ret = threadio->fnc(vb, threadio->priv);
- if (ret)
- break;
+ if (threadio->fnc(vb, threadio->priv))
+ break;
call_void_qop(q, wait_finish, q);
if (set_timestamp)
v4l2_get_timestamp(&fileio->b.timestamp);
- ret = vb2_internal_qbuf(q, &fileio->b);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
call_void_qop(q, wait_prepare, q);
- if (ret)
+ if (ret || threadio->stop)
break;
}
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
threadio->stop = true;
vb2_internal_streamoff(q, q->type);
call_void_qop(q, wait_prepare, q);
+ err = kthread_stop(threadio->thread);
q->fileio = NULL;
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
- err = kthread_stop(threadio->thread);
threadio->thread = NULL;
kfree(threadio);
q->fileio = NULL;
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 52a0c2f6264f..ae498b53ee40 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
return ret;
}
- ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO,
+ da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
if (ret) {
dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbdd0faeb6ce..210d1f85679e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
#ifdef CONFIG_PM
static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct rtsx_ucr *ucr =
- (struct rtsx_ucr *)usb_get_intfdata(intf);
-
dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
__func__, message.event);
- /*
- * Call to make sure LED is off during suspend to save more power.
- * It is NOT a permanent state and could be turned on anytime later.
- * Thus no need to call turn_on when resunming.
- */
- mutex_lock(&ucr->dev_mutex);
- rtsx_usb_turn_off_led(ucr);
- mutex_unlock(&ucr->dev_mutex);
-
return 0;
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e2f9df1c0c36..2d7fae94c861 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
+ [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index bee0abf82040..84adb46b3e2f 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_PU_LSB 0x91
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE24XX_REG_GPEDR_MSB 0x8C
#define STMPE24XX_REG_GPRER_LSB 0x91
#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPPUR_LSB 0x97
+#define STMPE24XX_REG_GPPDR_LSB 0x9a
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 0d256cb002eb..d6b764349f9d 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65218_clear_bits);
+static const struct regmap_range tps65218_yes_ranges[] = {
+ regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
+ regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
+};
+
+static const struct regmap_access_table tps65218_volatile_table = {
+ .yes_ranges = tps65218_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
+};
+
static struct regmap_config tps65218_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65218_volatile_table,
};
static const struct regmap_irq tps65218_irqs[] = {
@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
.num_regs = 2,
.mask_base = TPS65218_REG_INT_MASK1,
+ .status_base = TPS65218_REG_INT1,
};
static const struct of_device_id of_tps65218_match_table[] = {
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
-
- /* Release Problem State Area mapping */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed
*/
__detach_context(ctx);
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index ff2755062b44..06ff0a2ec960 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
+ /* H_RST may be found lit before reset is started,
+ * for example if preceding reset flow hasn't completed.
+ * In that case asserting H_RST will be ignored, therefore
+ * we need to clean H_RST bit to start a successful reset sequence.
+ */
+ if ((hcsr & H_RST) == H_RST) {
+ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ hcsr &= ~H_RST;
+ mei_me_reg_write(hw, H_CSR, hcsr);
+ hcsr = mei_hcsr_read(hw);
+ }
+
hcsr |= H_RST | H_IG | H_IS;
if (intr_enable)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02ad79229f65..7466ce098e60 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) &&
+ if (!mmc_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index e3e56d35f0ee..970314e0aac8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
+ { "INT344D" },
{ "PNP0D40" },
{ },
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 03427755b902..4f38554ce679 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d57c3d169914..1ec684d06d54 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -21,6 +21,9 @@
#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
/*
* PCI registers
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 45238871192d..ca3424e7ef71 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (IS_ERR(host))
return PTR_ERR(host);
- if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
- ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
- if (ret < 0)
- goto err_mbus_win;
- }
-
-
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -396,11 +395,11 @@ err_add_host:
pm_runtime_disable(&pdev->dev);
err_of_parse:
err_cd_req:
+err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
clk_disable_unprepare(pxa->clk_core);
err_clk_get:
-err_mbus_win:
sdhci_pltfm_free(pdev);
return ret;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cbb245b58538..f1a488ee432f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- host->mmc->max_blk_count =
- (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
}
sdhci_enable_card_detection(host);
}
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
return;
}
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_runtime_pm_get(host);
+ present = mmc_gpio_get_cd(host->mmc);
+
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
- present = mmc_gpio_get_cd(host->mmc);
if (present < 0) {
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
return !(present_state & SDHCI_DATA_LVL_MASK);
}
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= SDHCI_HS400_TUNING;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
int tuning_loop_counter = MAX_TUNING_LOOP;
int err = 0;
unsigned long flags;
+ unsigned int tuning_count = 0;
+ bool hs400_tuning;
sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ host->flags &= ~SDHCI_HS400_TUNING;
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* tuning function has to be executed.
*/
switch (host->timing) {
+ /* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
+ err = -EINVAL;
+ goto out_unlock;
+
case MMC_TIMING_MMC_HS200:
+ /*
+ * Periodic re-tuning for HS400 is not expected to be needed, so
+ * disable it here.
+ */
+ if (hs400_tuning)
+ tuning_count = 0;
+ break;
+
case MMC_TIMING_UHS_SDR104:
break;
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* FALLTHROUGH */
default:
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
- return 0;
+ goto out_unlock;
}
if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
out:
- /*
- * If this is the very first time we are here, we start the retuning
- * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
- * flag won't be set, we check this condition before actually starting
- * the timer.
- */
- if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+ if (tuning_count) {
host->flags |= SDHCI_USING_RETUNING_TIMER;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- /* Tuning mode 1 limits the maximum data length to 4MB */
- mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
- } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- /* Reload the new initial value for timer */
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
}
/*
@@ -2070,6 +2092,7 @@ out:
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ int present;
/* First check if client has provided their own card event */
if (host->ops->card_event)
host->ops->card_event(host);
+ present = sdhci_do_get_cd(host);
+
spin_lock_irqsave(&host->lock, flags);
/* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !sdhci_do_get_cd(host)) {
+ if (host->mrq && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
/*
- * Maximum number of sectors in one transfer. Limited by DMA boundary
- * size (512KiB).
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
*/
mmc->max_req_size = 524288;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 184c434ae305..0dceba1a2ba1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_err(bond_dev, "cannot release %s\n",
+ netdev_dbg(bond_dev, "cannot release %s\n",
slave_dev->name);
return -EINVAL;
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index a5fefb9059c5..b306210b02b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
struct vringh_kiov *riov = &cfv->ctx.riov;
unsigned int skb_len;
-again:
do {
skb = NULL;
@@ -322,7 +321,6 @@ exit:
napi_schedule_prep(napi)) {
vringh_notify_disable_kern(cfv->vr_rx);
__napi_schedule(napi);
- goto again;
}
break;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..c672c4dcffac 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
c_can_irq_control(priv, false);
+ /* put ctrl to init on stop to end ongoing transmission */
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
/* deactivate pins */
pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f363972cd77d..e36d10520e24 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
regmap_read(raminit->syscon, raminit->reg, &ctrl);
- /* We clear the done and start bit first. The start bit is
+ /* We clear the start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
- * And we clear the init done bit as well.
* NOTE: DONE must be written with 1 to clear it.
+ * We can't clear the DONE bit here using regmap_update_bits()
+ * as it will bypass the write if initial condition is START:0 DONE:1
+ * e.g. on DRA7 which needs START pulse.
*/
- ctrl &= ~(1 << raminit->bits.start);
- ctrl |= 1 << raminit->bits.done;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ ctrl &= ~mask; /* START = 0, DONE = 0 */
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
- ctrl &= ~(1 << raminit->bits.done);
- c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
+ /* check if START bit is 0. Ignore DONE bit for now
+ * as it can be either 0 or 1.
+ */
+ c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
if (enable) {
- /* Set start bit and wait for the done bit. */
+ /* Clear DONE bit & set START bit. */
ctrl |= 1 << raminit->bits.start;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
-
+ /* DONE must be written with 1 to clear it */
+ ctrl |= 1 << raminit->bits.done;
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
+ /* prevent further clearing of DONE bit */
+ ctrl &= ~(1 << raminit->bits.done);
/* clear START bit if start pulse is needed */
if (raminit->needs_pulse) {
ctrl &= ~(1 << raminit->bits.start);
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ regmap_update_bits(raminit->syscon, raminit->reg,
+ mask, ctrl);
}
ctrl |= 1 << raminit->bits.done;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3ec8f6f25e5f..847c1f813261 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
+
+ /* check whether changed bits are allowed to be modified */
+ if (cm->mask & ~priv->ctrlmode_supported)
return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
+ priv->ctrlmode |= (cm->flags & cm->mask);
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d7bc462aafdc..244529881be9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void)
priv->can.data_bittiming_const = &m_can_data_bittiming_const;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
+
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+ priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+ /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 541fb7a05625..7af379ca861b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
buf, msg->len,
- kvaser_usb_simple_msg_callback, priv);
+ kvaser_usb_simple_msg_callback, netdev);
usb_anchor_urb(urb, &priv->tx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if (status & M16C_STATE_BUS_RESET) {
- kvaser_usb_unlink_tx_urbs(priv);
- return;
- }
-
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
- if (status & M16C_STATE_BUS_OFF) {
+ if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
}
new_state = CAN_STATE_ERROR_PASSIVE;
- }
-
- if (status == M16C_STATE_BUS_ERROR) {
+ } else if (status & M16C_STATE_BUS_ERROR) {
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
((txerr >= 96) || (rxerr >= 96))) {
cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.can_stats.error_warning++;
new_state = CAN_STATE_ERROR_WARNING;
- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+ ((txerr < 96) && (rxerr < 96))) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
@@ -770,10 +764,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.state = new_state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -805,10 +798,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
stats->rx_over_errors++;
stats->rx_errors++;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -887,10 +879,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
cf->can_dlc);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -1246,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
priv->can.state = CAN_STATE_STOPPED;
close_candev(priv->netdev);
@@ -1294,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!urb) {
netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
- goto nourbmem;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
stats->tx_dropped++;
+ dev_kfree_skb(skb);
goto nobufmem;
}
@@ -1334,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
}
}
+ /* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
ret = NETDEV_TX_BUSY;
@@ -1364,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (unlikely(err)) {
can_free_echo_skb(netdev, context->echo_index);
- skb = NULL; /* set to NULL to avoid double free in
- * dev_kfree_skb(skb) */
-
atomic_dec(&priv->active_tx_urbs);
usb_unanchor_urb(urb);
@@ -1388,8 +1382,6 @@ releasebuf:
kfree(buf);
nobufmem:
usb_free_urb(urb);
-nourbmem:
- dev_kfree_skb(skb);
return ret;
}
@@ -1502,6 +1494,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
struct kvaser_usb_net_priv *priv;
int i, err;
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+ if (err)
+ return err;
+
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1588,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
{
struct kvaser_usb *dev;
int err = -ENOMEM;
- int i;
+ int i, retry = 3;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1606,10 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- for (i = 0; i < MAX_NET_DEVICES; i++)
- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_get_software_info(dev);
+ } while (--retry && err == -ETIMEDOUT);
- err = kvaser_usb_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 89c8d9fc97de..57e97910c728 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
- return -ENODEV;
+ goto err_out;
}
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
- return -EBUSY;
+ goto err_out;
}
reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
+err_out:
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index df76050d0a9d..eadcb053807e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
-
-config S6GMAC
- tristate "S6105 GMAC ethernet support"
- depends on XTENSA_VARIANT_S6000
- select PHYLIB
- ---help---
- This driver supports the on chip ethernet device on the
- S6105 xtensa processor.
-
- To compile this driver as a module, choose M here. The module
- will be called s6gmac.
-
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf56f8b36e90..1367afcd0a8b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
-obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 1fcd5568a352..f3470d96837a 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
}
db->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(db->clk))
+ if (IS_ERR(db->clk)) {
+ ret = PTR_ERR(db->clk);
goto out;
+ }
clk_prepare_enable(db->clk);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3498760dc22a..760c72c6e2ac 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
init_error:
free_skbufs(dev);
alloc_skbuf_error:
- if (priv->phydev) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
phy_error:
return ret;
}
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
int ret;
unsigned long int flags;
- /* Stop and disconnect the PHY */
- if (priv->phydev) {
+ /* Stop the PHY */
+ if (priv->phydev)
phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
#define MTL_Q_RQOMR_EHFC_INDEX 7
#define MTL_Q_RQOMR_EHFC_WIDTH 1
-#define MTL_Q_RQOMR_RFA_INDEX 8
-#define MTL_Q_RQOMR_RFA_WIDTH 3
-#define MTL_Q_RQOMR_RFD_INDEX 13
-#define MTL_Q_RQOMR_RFD_WIDTH 3
#define MTL_Q_RQOMR_RQS_INDEX 16
#define MTL_Q_RQOMR_RQS_WIDTH 9
#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
/* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
}
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e398eda07298..c8af3ce3ea38 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk);
}
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
{
struct alx_rx_queue *rxq = &alx->rxq;
struct alx_rrd *rrd;
struct alx_buffer *rxb;
struct sk_buff *skb;
u16 length, rfd_cleaned = 0;
+ int work = 0;
- while (budget > 0) {
+ while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
ALX_GET_FIELD(le32_to_cpu(rrd->word0),
RRD_NOR) != 1) {
alx_schedule_reset(alx);
- return 0;
+ return work;
}
rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
napi_gro_receive(&alx->napi, skb);
- budget--;
+ work++;
next_pkt:
if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
if (rfd_cleaned)
alx_refill_rx_ring(alx, GFP_ATOMIC);
- return budget > 0;
+ return work;
}
static int alx_poll(struct napi_struct *napi, int budget)
{
struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
struct alx_hw *hw = &alx->hw;
- bool complete = true;
unsigned long flags;
+ bool tx_complete;
+ int work;
- complete = alx_clean_tx_irq(alx) &&
- alx_clean_rx_irq(alx, budget);
+ tx_complete = alx_clean_tx_irq(alx);
+ work = alx_clean_rx_irq(alx, budget);
- if (!complete)
- return 1;
+ if (!tx_complete || work == budget)
+ return budget;
napi_complete(&alx->napi);
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
alx_post_write(hw);
- return 0;
+ return work;
}
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 05c6af6c418f..3007d95fbb9f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
bgmac->int_status = 0;
}
- if (handled < weight)
+ if (handled < weight) {
napi_complete(napi);
-
- bgmac_chip_intrs_on(bgmac);
+ bgmac_chip_intrs_on(bgmac);
+ }
return handled;
}
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
netif_carrier_off(net_dev);
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
-
return 0;
err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
{
struct bgmac *bgmac = bcma_get_drvdata(core);
- netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
bgmac_mii_unregister(bgmac);
+ netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
if (!bnx2x_fp_lock_napi(fp))
- return work_done;
+ return budget;
for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9f5e38769a29..72eef9fc883e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
return 0;
}
-static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
- .ndo_gso_check = bnx2x_gso_check,
+ .ndo_features_check = bnx2x_features_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb48a610b72a..96bf01ba32dd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
}
static void tg3_irq_quiesce(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
int i;
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1;
smp_mb();
+ spin_unlock_bh(&tp->lock);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+
+ spin_lock_bh(&tp->lock);
}
/* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
}
smp_mb();
+ tg3_full_unlock(tp);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+ tg3_full_lock(tp, 0);
+
if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
- if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
- goto restart_timer;
-
spin_lock(&tp->lock);
+ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+ spin_unlock(&tp->lock);
+ goto restart_timer;
+ }
+
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err;
+ rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
+ rtnl_unlock();
return;
}
@@ -11138,6 +11154,7 @@ out:
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ rtnl_unlock();
}
static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -17800,23 +17817,6 @@ static int tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
- /*
- * Reset chip in case UNDI or EFI driver did not shutdown
- * DMA self test will enable WDMAC and we'll see (spurious)
- * pending DMA on the PCI bus at that point.
- */
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- }
-
- err = tg3_test_dma(tp);
- if (err) {
- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
- goto err_out_apeunmap;
- }
-
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17861,23 @@ static int tg3_init_one(struct pci_dev *pdev,
sndmbx += 0xc;
}
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
tg3_init_coal(tp);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7d6aa8c87df8..619083a860a4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
/* Retrieve flash partition info */
fcomp.comp_status = 0;
- init_completion(&fcomp.comp);
+ reinit_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
bnad_cb_completion, &fcomp);
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 55eb7f2af2b4..7ef55f5fa664 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
res = PTR_ERR(lp->pclk);
goto err_free_dev;
}
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
lp->hclk = ERR_PTR(-ENOENT);
lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
free_netdev(dev);
return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
}
return 0;
}
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index d00a751f0588..6049f70e110c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -96,6 +96,9 @@ struct port_info {
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
+ s8 mdio_addr;
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
/*
* SGE function prototype declarations.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index aa74ec34a467..a936ee8958c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -44,6 +44,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
@@ -210,6 +211,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
}
/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+ const struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+ dev->name);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+ dev->name, mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+ "module inserted\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+ "forcing TWINAX\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+ dev->name);
+ else
+ dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+ "inserted\n", dev->name, pi->mod_type);
+}
+
+/*
* Net device operations.
* ======================
*/
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
* state of the port to which we're linked.
*/
-/*
- * Return current port link settings.
- */
-static int cxgb4vf_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- const struct port_info *pi = netdev_priv(dev);
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+ unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
+ v |= SUPPORTED_FIBRE;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
+ v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+ } else
+ cmd->port = PORT_OTHER;
- cmd->supported = pi->link_cfg.supported;
- cmd->advertising = pi->link_cfg.advertising;
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.supported);
+ cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
-
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = pi->port_id;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
@@ -2318,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
*/
n10g = 0;
for_each_port(adapter, pidx)
- n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8d3237f5e364..b9debb4f29a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -230,7 +230,7 @@ struct adapter_params {
static inline bool is_10g_port(const struct link_config *lc)
{
- return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 02e8833b7797..60426cf890a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
return a & 0x3f;
}
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
- if (lc->supported & SUPPORTED_Autoneg) {
- lc->advertising = lc->supported;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
struct fw_vi_cmd vi_cmd, vi_rpl;
struct fw_port_cmd port_cmd, port_rpl;
int v;
- u32 word;
/*
* Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,13 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
if (v)
return v;
- v = 0;
- word = be16_to_cpu(port_rpl.u.info.pcap);
- if (word & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (word & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_40G)
- v |= SUPPORTED_40000baseSR4_Full;
- if (word & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- init_link_config(&pi->link_cfg, v);
+ v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(v) : -1;
+ pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
return 0;
}
@@ -1491,7 +1488,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
*/
const struct fw_port_cmd *port_cmd =
(const struct fw_port_cmd *)rpl;
- u32 word;
+ u32 stat, mod;
int action, port_id, link_ok, speed, fc, pidx;
/*
@@ -1509,21 +1506,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
- word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
+ stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE_F)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE_F)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1540,12 +1537,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
continue;
lc = &pi->link_cfg;
+
+ mod = FW_PORT_CMD_MODTYPE_G(stat);
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4vf_os_portmod_changed(adapter, pidx);
+ }
+
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) {
/* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported =
+ be16_to_cpu(port_cmd->u.info.pcap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 868d0f605d60..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
- skb->csum = htons(checksum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+ ipv4_csum_ok)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -1331,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
int err;
if (!enic_poll_lock_napi(&enic->rq[rq]))
- return work_done;
+ return budget;
/* Service RQ
*/
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
- goto err_out_notify_unset;
+ goto err_out_free_rq;
}
}
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
return 0;
-err_out_notify_unset:
+err_out_free_rq:
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_free_intr(enic);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index a379c3e4b57f..13d00a38a5bd 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* break out of while loop if there are no more
* packets waiting
*/
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
- napi_complete(napi);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
- }
+ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+ break;
cmd_word = dnet_readl(bp, RX_LEN_FIFO);
pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
"size %u.\n", dev->name, pkt_len);
}
- budget -= npackets;
-
if (npackets < budget) {
/* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts */
+ * stop polling then re-enable rx interrupts.
+ */
napi_complete(napi);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
}
- /* There are still packets waiting */
- return 1;
+ return npackets;
}
static irqreturn_t dnet_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 196073110e32..d48806b5cd88 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
* is expected to work across all types of IP tunnels once exported. Skyhawk
* supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. Note this only
- * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
* adds more than one port, disable offloads and don't re-enable them again
@@ -4459,9 +4460,45 @@ done:
adapter->vxlan_port_count--;
}
-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t be_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 l4_hdr = 0;
+
+ /* The code below restricts offload features for some tunneled packets.
+ * Offload features for normal (non tunnel) packets are unchanged.
+ */
+ if (!skb->encapsulation ||
+ !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+ return features;
+
+ /* It's an encapsulated packet and VxLAN offloads are enabled. We
+ * should disable tunnel offload features if it's not a VxLAN packet,
+ * as tunnel offloads have been enabled only for VxLAN. This is done to
+ * allow other tunneled traffic like GRE work fine while VxLAN
+ * offloads are configured in Skyhawk-R.
+ */
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_hdr != IPPROTO_UDP ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
}
#endif
@@ -4492,7 +4529,7 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port,
- .ndo_gso_check = be_gso_check,
+ .ndo_features_check = be_features_check,
#endif
};
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 469691ad4a1e..40132929daf7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -424,6 +424,8 @@ struct bufdesc_ex {
* (40ns * 6).
*/
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
struct fec_enet_priv_tx_q {
int index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ebdf8dc8a31..bba87775419d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO, i;
/*
- * The dual fec interfaces are not equivalent with enet-mac.
+ * The i.MX28 dual fec interfaces are not equal.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data;
+ fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5b8300a32bf5..4d61ef50b465 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -281,6 +281,17 @@ config I40E_DCB
If unsure, say N.
+config I40E_FCOE
+ bool "Fibre Channel over Ethernet (FCoE)"
+ default n
+ depends on I40E && DCB && FCOE
+ ---help---
+ Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+ in the driver. This will create new netdev for exclusive FCoE
+ use with XL710 FCoE offloads enabled.
+
+ If unsure, say N.
+
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 781065eb5431..e9c3a87e5b11 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 4b94ddb29c24..c40581999121 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 433a55886ad2..cb0de455683e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
- return;
+ goto out;
}
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+
+out:
kfree(ring);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 045b5c4b98b3..ad802dd0f67a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,7 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 04b441460bbd..cecb340898fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+#define WB_STRIDE 0x3
+
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
+ "tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index);
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+ /* do not fire the reset immediately, wait for the stack to
+ * decide we are truly stuck, also prevents every queue from
+ * simultaneously requesting a reset
+ */
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
+ /* the adapter is about to reset, no point in enabling polling */
+ budget = 1;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+ /* allow 00 to be written to the index */;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ val);
}
/**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel &&
- (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
- !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ if (ipv4_tunnel) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0;
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ (udp_hdr(skb)->check != 0)) {
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
+
+ } /* else its GRE and so no outer UDP header */
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/
-#define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e60d3accb2e2..18b00231d2f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -241,6 +241,7 @@ struct i40e_ring {
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 051ea94bdcd3..0f69ef81751a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = 0;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
while (i < timeout) {
if (igb_get_hw_semaphore(hw)) {
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
/*
* RX/TX descriptors.
*/
@@ -362,6 +366,7 @@ struct tx_queue {
dma_addr_t tso_hdrs_dma;
struct tx_desc *tx_desc_area;
+ char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->l4i_chk = 0;
desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
- void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
- addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
/*
* The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0, desc->byte_cnt,
+ DMA_TO_DEVICE);
}
}
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
if (nr_frags) {
txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
+ char desc_dma_map;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
+ desc_dma_map = txq->tx_desc_mapping[tx_index];
+
cmd_sts = desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+ if (desc_dma_map == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
+ int ret;
int i;
txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+ GFP_KERNEL);
+ if (!txq->tx_desc_mapping) {
+ ret = -ENOMEM;
+ goto err_free_desc_area;
+ }
+
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
- dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
- txq->tx_desc_area, txq->tx_desc_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);
return 0;
+
+err_free_desc_mapping:
+ kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+ if (index == 0 && size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return ret;
}
static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ kfree(txq->tx_desc_mapping);
+
if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 190cbd931f6b..ac6a8f1eea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
{
int err;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
@@ -2365,9 +2366,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
}
-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -2400,7 +2403,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
@@ -2434,7 +2437,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a308d41e4de0..e3357bf523df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell) {
wmb();
- iowrite32(ring->doorbell_qpn,
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
} else {
ring->xmit_more++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 943cbd47d832..6e08352ec994 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
- dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
- dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1829,7 +1828,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- goto err_stop_fw;
+ return err;
}
choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1859,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
&init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- goto err_stop_fw;
+ return err;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1873,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
- goto err_stop_fw;
+ return err;
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
@@ -1886,7 +1885,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_query_func(dev, &dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
- goto err_stop_fw;
+ goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap.max_eqs;
dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2005,6 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
-err_stop_fw:
- if (!mlx4_is_slave(dev)) {
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d6f549685c0f..7094a9c70fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mtt_cleanup(dev, &mr->mtt);
+ mr->mtt.order = -1;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
int err;
- mpt_entry->start = cpu_to_be64(iova);
- mpt_entry->length = cpu_to_be64(size);
- mpt_entry->entity_size = cpu_to_be32(page_shift);
-
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f1ebed6c63b1..2fa6ae026e4f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
/* Spanning Tree */
-static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
-}
-
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index af099057f0e9..71af98bb72cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
- if (mgp->cmd == NULL)
+ if (!mgp->cmd) {
+ status = -ENOMEM;
goto abort_with_enabled;
+ }
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..db0c7a9aee60 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
if (sp->s2io_entries[i].in_use == MSIX_FLG) {
if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-TX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: UDP Fragmentation Offload(UFO) enabled\n",
dev->name);
/* Initialize device name */
- sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
+ sp->product_name);
if (vlan_tag_strip)
sp->vlan_strip_flag = 1;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = netxen_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c2f09af5c25b..4847713211ca 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{
int i = 0;
- while (i < 10) {
- if (i)
- ssleep(1);
-
+ do {
if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- }
+ ssleep(1);
+ } while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1aa25b13ace1..2528c3fb6b90 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
- .ndo_gso_check = qlcnic_gso_check,
+ .ndo_features_check = qlcnic_features_check,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
goto err_out_free_hw;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 6d0b9dfac313..78bb4ceb1cdd 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
if (rc)
goto err_out;
+ disable_dev_on_err = 1;
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
goto err_out;
- disable_dev_on_err = 1;
pci_set_master (pdev);
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
return 0;
err_out:
+ netif_napi_del(&tp->napi);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
assert (dev != NULL);
cancel_delayed_work_sync(&tp->thread);
+ netif_napi_del(&tp->napi);
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c29ba80ae02b..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -473,6 +476,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
.apr = 1,
.mpr = 1,
@@ -495,6 +499,9 @@ static struct sh_eth_cpu_data r8a779x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
+
+ .trscer_err_mask = DESC_I_RINT8,
.apr = 1,
.mpr = 1,
@@ -856,6 +863,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->trscer_err_mask)
+ cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
}
static int sh_eth_check_reset(struct net_device *ndev)
@@ -1113,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1126,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
sh_eth_set_receive_align(skb);
@@ -1135,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
- dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
- DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(skb->data);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[i] = skb;
+ rxdesc->addr = dma_addr;
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1294,7 +1310,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Frame recv control (enable multiple-packets per rx irq) */
sh_eth_write(ndev, RMCR_RNC, RMCR);
- sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+ sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -1309,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- if (start)
+ if (start) {
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ }
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1349,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
return ret;
}
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Deactivate all TX descriptors, so DMA should stop at next
+ * packet boundary if it's currently running
+ */
+ for (i = 0; i < mdp->num_tx_ring; i++)
+ mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+ /* Disable TX FIFO egress to MAC */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Stop RX DMA at next packet boundary */
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* Aside from TX DMA, we can't tell when the hardware is
+ * really stopped, so we need to reset to make sure.
+ * Before doing that, wait for long enough to *probably*
+ * finish transmitting the last packet and poll stats.
+ */
+ msleep(2); /* max frame time at 10 Mbps < 1250 us */
+ sh_eth_get_stats(ndev);
+ sh_eth_reset(ndev);
+}
+
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
@@ -1393,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1440,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, rxdesc->addr,
+ ALIGN(mdp->rx_buf_sz, 16),
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
@@ -1462,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(skb->data);
+ rxdesc->addr = dma_addr;
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1566,7 +1617,6 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
@@ -1585,13 +1635,11 @@ ignore_link:
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1646,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
ret = IRQ_HANDLED;
else
- goto other_irq;
+ goto out;
+
+ if (!likely(mdp->irq_enabled)) {
+ sh_eth_write(ndev, 0, EESIPR);
+ goto out;
+ }
if (intr_status & EESR_RX_CHECK) {
if (napi_schedule_prep(&mdp->napi)) {
@@ -1677,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
sh_eth_error(ndev, intr_status);
}
-other_irq:
+out:
spin_unlock(&mdp->lock);
return ret;
@@ -1705,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* Reenable Rx interrupts */
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (mdp->irq_enabled)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
out:
return budget - quota;
}
@@ -1820,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_ethtool_gset(mdp->phydev, ecmd);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1834,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
/* disable tx and rx */
@@ -1868,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_start_aneg(mdp->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1952,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return -EINVAL;
if (netif_running(ndev)) {
+ netif_device_detach(ndev);
netif_tx_disable(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
- sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+
+ /* Serialise with the interrupt handler and NAPI, then
+ * disable interrupts. We have to clear the
+ * irq_enabled flag first to ensure that interrupts
+ * won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
- }
+ napi_synchronize(&mdp->napi);
+ sh_eth_write(ndev, 0x0000, EESIPR);
- /* Free all the skbuffs in the Rx queue. */
- sh_eth_ring_free(ndev);
- /* Free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
+ sh_eth_dev_exit(ndev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+ }
/* Set new parameters */
mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending;
- ret = sh_eth_ring_init(ndev);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
- return ret;
- }
- ret = sh_eth_dev_init(ndev, false);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
- return ret;
- }
-
if (netif_running(ndev)) {
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+ __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+ __func__);
+ return ret;
+ }
+
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_wake_queue(ndev);
+ netif_device_attach(ndev);
}
return 0;
@@ -2101,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
@@ -2110,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETH_ZLEN)
- txdesc->buffer_length = ETH_ZLEN;
- else
- txdesc->buffer_length = skb->len;
+ if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2165,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
+ /* Serialise with the interrupt handler and NAPI, then disable
+ * interrupts. We have to clear the irq_enabled flag first to
+ * ensure that interrupts won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
+ synchronize_irq(ndev->irq);
+ napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_dev_exit(ndev);
- sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
phy_disconnect(mdp->phydev);
+ mdp->phydev = NULL;
}
free_irq(ndev->irq, ndev);
- napi_disable(&mdp->napi);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2410,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return 0;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2433,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2443,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
}
}
-/* Multicast reception directions set */
-static void sh_eth_set_multicast_list(struct net_device *ndev)
+/* Update promiscuous flag and multicast filter */
+static void sh_eth_set_rx_mode(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ecmr_bits;
@@ -2455,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
- ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+ ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
+ if (mdp->cd->tsu)
+ ecmr_bits |= ECMR_MCT;
if (!(ndev->flags & IFF_MULTICAST)) {
sh_eth_tsu_purge_mcast(ndev);
@@ -2484,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
}
}
}
- } else {
- /* Normal, unicast/broadcast-only mode. */
- ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
/* update the ethernet mode */
@@ -2694,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -2706,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
- .ndo_set_rx_mode = sh_eth_set_multicast_list,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 22301bf9c21d..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
DESC_I_RINT1 = 0x0001,
};
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
/* RPADIR */
enum RPADIR_BIT {
RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
unsigned long tx_check;
unsigned long eesr_err_check;
+ /* Error mask */
+ unsigned long trscer_err_mask;
+
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
unsigned no_psr:1; /* EtherC DO NOT have PSR */
@@ -508,6 +513,7 @@ struct sh_eth_private {
u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
+ bool irq_enabled;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644
index f537cbea20e5..000000000000
--- a/drivers/net/ethernet/s6gmac.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Ethernet driver for S6105 on chip network device
- * (c)2008 emlix GmbH http://www.emlix.com
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
-#include <linux/stddef.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#define DRV_NAME "s6gmac"
-#define DRV_PRMT DRV_NAME ": "
-
-
-/* register declarations */
-
-#define S6_GMAC_MACCONF1 0x000
-#define S6_GMAC_MACCONF1_TXENA 0
-#define S6_GMAC_MACCONF1_SYNCTX 1
-#define S6_GMAC_MACCONF1_RXENA 2
-#define S6_GMAC_MACCONF1_SYNCRX 3
-#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
-#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
-#define S6_GMAC_MACCONF1_LOOPBACK 8
-#define S6_GMAC_MACCONF1_RESTXFUNC 16
-#define S6_GMAC_MACCONF1_RESRXFUNC 17
-#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
-#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
-#define S6_GMAC_MACCONF1_SIMULRES 30
-#define S6_GMAC_MACCONF1_SOFTRES 31
-#define S6_GMAC_MACCONF2 0x004
-#define S6_GMAC_MACCONF2_FULL 0
-#define S6_GMAC_MACCONF2_CRCENA 1
-#define S6_GMAC_MACCONF2_PADCRCENA 2
-#define S6_GMAC_MACCONF2_LENGTHFCHK 4
-#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
-#define S6_GMAC_MACCONF2_IFMODE 8
-#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
-#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
-#define S6_GMAC_MACCONF2_IFMODE_MASK 3
-#define S6_GMAC_MACCONF2_PREAMBLELEN 12
-#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
-#define S6_GMAC_MACIPGIFG 0x008
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
-#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
-#define S6_GMAC_MACHALFDUPLEX 0x00C
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
-#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
-#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
-#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
-#define S6_GMAC_MACMAXFRAMELEN 0x010
-#define S6_GMAC_MACMIICONF 0x020
-#define S6_GMAC_MACMIICONF_CSEL 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
-#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
-#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
-#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
-#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
-#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
-#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
-#define S6_GMAC_MACMIICONF_CSEL_MASK 7
-#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
-#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
-#define S6_GMAC_MACMIICMD 0x024
-#define S6_GMAC_MACMIICMD_READ 0
-#define S6_GMAC_MACMIICMD_SCAN 1
-#define S6_GMAC_MACMIIADDR 0x028
-#define S6_GMAC_MACMIIADDR_REG 0
-#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
-#define S6_GMAC_MACMIIADDR_PHY 8
-#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
-#define S6_GMAC_MACMIICTRL 0x02C
-#define S6_GMAC_MACMIISTAT 0x030
-#define S6_GMAC_MACMIIINDI 0x034
-#define S6_GMAC_MACMIIINDI_BUSY 0
-#define S6_GMAC_MACMIIINDI_SCAN 1
-#define S6_GMAC_MACMIIINDI_INVAL 2
-#define S6_GMAC_MACINTERFSTAT 0x03C
-#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
-#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
-#define S6_GMAC_MACSTATADDR1 0x040
-#define S6_GMAC_MACSTATADDR2 0x044
-
-#define S6_GMAC_FIFOCONF0 0x048
-#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
-#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
-#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
-#define S6_GMAC_FIFOCONF0_HSTRSTST 3
-#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
-#define S6_GMAC_FIFOCONF0_WTMENREQ 8
-#define S6_GMAC_FIFOCONF0_SRFENREQ 9
-#define S6_GMAC_FIFOCONF0_FRFENREQ 10
-#define S6_GMAC_FIFOCONF0_STFENREQ 11
-#define S6_GMAC_FIFOCONF0_FTFENREQ 12
-#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
-#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
-#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
-#define S6_GMAC_FIFOCONF0_STFENRPLY 19
-#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
-#define S6_GMAC_FIFOCONF1 0x04C
-#define S6_GMAC_FIFOCONF2 0x050
-#define S6_GMAC_FIFOCONF2_CFGLWM 0
-#define S6_GMAC_FIFOCONF2_CFGHWM 16
-#define S6_GMAC_FIFOCONF3 0x054
-#define S6_GMAC_FIFOCONF3_CFGFTTH 0
-#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
-#define S6_GMAC_FIFOCONF4 0x058
-#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
-#define S6_GMAC_FIFOCONF_RSV_RUNT 1
-#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
-#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
-#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
-#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
-#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
-#define S6_GMAC_FIFOCONF_RSV_OK 7
-#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
-#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
-#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
-#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
-#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
-#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
-#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
-#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
-#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
-#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
-#define S6_GMAC_FIFOCONF5 0x05C
-#define S6_GMAC_FIFOCONF5_DROPLT64 18
-#define S6_GMAC_FIFOCONF5_CFGBYTM 19
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
-
-#define S6_GMAC_STAT_REGS 0x080
-#define S6_GMAC_STAT_SIZE_MIN 12
-#define S6_GMAC_STATTR64 0x080
-#define S6_GMAC_STATTR64_SIZE 18
-#define S6_GMAC_STATTR127 0x084
-#define S6_GMAC_STATTR127_SIZE 18
-#define S6_GMAC_STATTR255 0x088
-#define S6_GMAC_STATTR255_SIZE 18
-#define S6_GMAC_STATTR511 0x08C
-#define S6_GMAC_STATTR511_SIZE 18
-#define S6_GMAC_STATTR1K 0x090
-#define S6_GMAC_STATTR1K_SIZE 18
-#define S6_GMAC_STATTRMAX 0x094
-#define S6_GMAC_STATTRMAX_SIZE 18
-#define S6_GMAC_STATTRMGV 0x098
-#define S6_GMAC_STATTRMGV_SIZE 18
-#define S6_GMAC_STATRBYT 0x09C
-#define S6_GMAC_STATRBYT_SIZE 24
-#define S6_GMAC_STATRPKT 0x0A0
-#define S6_GMAC_STATRPKT_SIZE 18
-#define S6_GMAC_STATRFCS 0x0A4
-#define S6_GMAC_STATRFCS_SIZE 12
-#define S6_GMAC_STATRMCA 0x0A8
-#define S6_GMAC_STATRMCA_SIZE 18
-#define S6_GMAC_STATRBCA 0x0AC
-#define S6_GMAC_STATRBCA_SIZE 22
-#define S6_GMAC_STATRXCF 0x0B0
-#define S6_GMAC_STATRXCF_SIZE 18
-#define S6_GMAC_STATRXPF 0x0B4
-#define S6_GMAC_STATRXPF_SIZE 12
-#define S6_GMAC_STATRXUO 0x0B8
-#define S6_GMAC_STATRXUO_SIZE 12
-#define S6_GMAC_STATRALN 0x0BC
-#define S6_GMAC_STATRALN_SIZE 12
-#define S6_GMAC_STATRFLR 0x0C0
-#define S6_GMAC_STATRFLR_SIZE 16
-#define S6_GMAC_STATRCDE 0x0C4
-#define S6_GMAC_STATRCDE_SIZE 12
-#define S6_GMAC_STATRCSE 0x0C8
-#define S6_GMAC_STATRCSE_SIZE 12
-#define S6_GMAC_STATRUND 0x0CC
-#define S6_GMAC_STATRUND_SIZE 12
-#define S6_GMAC_STATROVR 0x0D0
-#define S6_GMAC_STATROVR_SIZE 12
-#define S6_GMAC_STATRFRG 0x0D4
-#define S6_GMAC_STATRFRG_SIZE 12
-#define S6_GMAC_STATRJBR 0x0D8
-#define S6_GMAC_STATRJBR_SIZE 12
-#define S6_GMAC_STATRDRP 0x0DC
-#define S6_GMAC_STATRDRP_SIZE 12
-#define S6_GMAC_STATTBYT 0x0E0
-#define S6_GMAC_STATTBYT_SIZE 24
-#define S6_GMAC_STATTPKT 0x0E4
-#define S6_GMAC_STATTPKT_SIZE 18
-#define S6_GMAC_STATTMCA 0x0E8
-#define S6_GMAC_STATTMCA_SIZE 18
-#define S6_GMAC_STATTBCA 0x0EC
-#define S6_GMAC_STATTBCA_SIZE 18
-#define S6_GMAC_STATTXPF 0x0F0
-#define S6_GMAC_STATTXPF_SIZE 12
-#define S6_GMAC_STATTDFR 0x0F4
-#define S6_GMAC_STATTDFR_SIZE 12
-#define S6_GMAC_STATTEDF 0x0F8
-#define S6_GMAC_STATTEDF_SIZE 12
-#define S6_GMAC_STATTSCL 0x0FC
-#define S6_GMAC_STATTSCL_SIZE 12
-#define S6_GMAC_STATTMCL 0x100
-#define S6_GMAC_STATTMCL_SIZE 12
-#define S6_GMAC_STATTLCL 0x104
-#define S6_GMAC_STATTLCL_SIZE 12
-#define S6_GMAC_STATTXCL 0x108
-#define S6_GMAC_STATTXCL_SIZE 12
-#define S6_GMAC_STATTNCL 0x10C
-#define S6_GMAC_STATTNCL_SIZE 13
-#define S6_GMAC_STATTPFH 0x110
-#define S6_GMAC_STATTPFH_SIZE 12
-#define S6_GMAC_STATTDRP 0x114
-#define S6_GMAC_STATTDRP_SIZE 12
-#define S6_GMAC_STATTJBR 0x118
-#define S6_GMAC_STATTJBR_SIZE 12
-#define S6_GMAC_STATTFCS 0x11C
-#define S6_GMAC_STATTFCS_SIZE 12
-#define S6_GMAC_STATTXCF 0x120
-#define S6_GMAC_STATTXCF_SIZE 12
-#define S6_GMAC_STATTOVR 0x124
-#define S6_GMAC_STATTOVR_SIZE 12
-#define S6_GMAC_STATTUND 0x128
-#define S6_GMAC_STATTUND_SIZE 12
-#define S6_GMAC_STATTFRG 0x12C
-#define S6_GMAC_STATTFRG_SIZE 12
-#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
-#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
-#define S6_GMAC_STATCARRY1_RDRP 0
-#define S6_GMAC_STATCARRY1_RJBR 1
-#define S6_GMAC_STATCARRY1_RFRG 2
-#define S6_GMAC_STATCARRY1_ROVR 3
-#define S6_GMAC_STATCARRY1_RUND 4
-#define S6_GMAC_STATCARRY1_RCSE 5
-#define S6_GMAC_STATCARRY1_RCDE 6
-#define S6_GMAC_STATCARRY1_RFLR 7
-#define S6_GMAC_STATCARRY1_RALN 8
-#define S6_GMAC_STATCARRY1_RXUO 9
-#define S6_GMAC_STATCARRY1_RXPF 10
-#define S6_GMAC_STATCARRY1_RXCF 11
-#define S6_GMAC_STATCARRY1_RBCA 12
-#define S6_GMAC_STATCARRY1_RMCA 13
-#define S6_GMAC_STATCARRY1_RFCS 14
-#define S6_GMAC_STATCARRY1_RPKT 15
-#define S6_GMAC_STATCARRY1_RBYT 16
-#define S6_GMAC_STATCARRY1_TRMGV 25
-#define S6_GMAC_STATCARRY1_TRMAX 26
-#define S6_GMAC_STATCARRY1_TR1K 27
-#define S6_GMAC_STATCARRY1_TR511 28
-#define S6_GMAC_STATCARRY1_TR255 29
-#define S6_GMAC_STATCARRY1_TR127 30
-#define S6_GMAC_STATCARRY1_TR64 31
-#define S6_GMAC_STATCARRY2_TDRP 0
-#define S6_GMAC_STATCARRY2_TPFH 1
-#define S6_GMAC_STATCARRY2_TNCL 2
-#define S6_GMAC_STATCARRY2_TXCL 3
-#define S6_GMAC_STATCARRY2_TLCL 4
-#define S6_GMAC_STATCARRY2_TMCL 5
-#define S6_GMAC_STATCARRY2_TSCL 6
-#define S6_GMAC_STATCARRY2_TEDF 7
-#define S6_GMAC_STATCARRY2_TDFR 8
-#define S6_GMAC_STATCARRY2_TXPF 9
-#define S6_GMAC_STATCARRY2_TBCA 10
-#define S6_GMAC_STATCARRY2_TMCA 11
-#define S6_GMAC_STATCARRY2_TPKT 12
-#define S6_GMAC_STATCARRY2_TBYT 13
-#define S6_GMAC_STATCARRY2_TFRG 14
-#define S6_GMAC_STATCARRY2_TUND 15
-#define S6_GMAC_STATCARRY2_TOVR 16
-#define S6_GMAC_STATCARRY2_TXCF 17
-#define S6_GMAC_STATCARRY2_TFCS 18
-#define S6_GMAC_STATCARRY2_TJBR 19
-
-#define S6_GMAC_HOST_PBLKCTRL 0x140
-#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
-#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
-#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
-#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
-#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
-#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
-#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
-#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
-#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
-#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
-#define S6_GMAC_HOST_INTMASK 0x144
-#define S6_GMAC_HOST_INTSTAT 0x148
-#define S6_GMAC_HOST_INT_TXBURSTOVER 3
-#define S6_GMAC_HOST_INT_TXPREWOVER 4
-#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
-#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
-#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
-#define S6_GMAC_HOST_RXFIFOHWM 0x14C
-#define S6_GMAC_HOST_CTRLFRAMXP 0x150
-#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
-#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
-
-#define S6_GMAC_BURST_PREWR 0x1B0
-#define S6_GMAC_BURST_PREWR_LEN 0
-#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_PREWR_CFE 20
-#define S6_GMAC_BURST_PREWR_PPE 21
-#define S6_GMAC_BURST_PREWR_FCS 22
-#define S6_GMAC_BURST_PREWR_PAD 23
-#define S6_GMAC_BURST_POSTRD 0x1D0
-#define S6_GMAC_BURST_POSTRD_LEN 0
-#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_POSTRD_DROP 20
-
-
-/* data handling */
-
-#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
-#define S6_NUM_RX_SKB 16
-#define S6_MAX_FRLEN 1536
-
-struct s6gmac {
- u32 reg;
- u32 tx_dma;
- u32 rx_dma;
- u32 io;
- u8 tx_chan;
- u8 rx_chan;
- spinlock_t lock;
- u8 tx_skb_i, tx_skb_o;
- u8 rx_skb_i, rx_skb_o;
- struct sk_buff *tx_skb[S6_NUM_TX_SKB];
- struct sk_buff *rx_skb[S6_NUM_RX_SKB];
- unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
- unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
- struct phy_device *phydev;
- struct {
- struct mii_bus *bus;
- int irq[PHY_MAX_ADDR];
- } mii;
- struct {
- unsigned int mbit;
- u8 giga;
- u8 isup;
- u8 full;
- } link;
-};
-
-static void s6gmac_rx_fillfifo(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct sk_buff *skb;
- while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
- (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
- pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
- s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
- pd->io, (u32)skb->data, S6_MAX_FRLEN);
- }
-}
-
-static void s6gmac_rx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 pfx;
- struct sk_buff *skb;
- while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
- s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
- skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
- pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
- if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
- dev_kfree_skb_irq(skb);
- } else {
- skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
- & S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
- }
- }
-}
-
-static void s6gmac_tx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
- s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
- dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- }
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
-}
-
-struct s6gmac_statinf {
- unsigned reg_size : 4; /* 0: unused */
- unsigned reg_off : 6;
- unsigned net_index : 6;
-};
-
-#define S6_STATS_B (8 * sizeof(u32))
-#define S6_STATS_C(b, r, f) [b] = { \
- BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
- BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
- >= (1<<4)) + \
- r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
- BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
- >= ((1<<6)-1)) + \
- (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
- BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
- % sizeof(unsigned long)) + \
- BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
- / sizeof(unsigned long)) >= (1<<6))) + \
- BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
- != sizeof(unsigned long))) + \
- (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
-
-static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
- S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
- S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
-}, {
- S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
- S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
- S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
-} };
-
-static void s6gmac_stats_collect(struct s6gmac *pd,
- const struct s6gmac_statinf *inf)
-{
- int b;
- for (b = 0; b < S6_STATS_B; b++) {
- if (inf[b].reg_size) {
- pd->stats[inf[b].net_index] +=
- readl(pd->reg + S6_GMAC_STAT_REGS
- + sizeof(u32) * inf[b].reg_off);
- }
- }
-}
-
-static void s6gmac_stats_carry(struct s6gmac *pd,
- const struct s6gmac_statinf *inf, u32 mask)
-{
- int b;
- while (mask) {
- b = fls(mask) - 1;
- mask &= ~(1 << b);
- pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
- }
-}
-
-static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
-{
- int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
- ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
- return r;
-}
-
-static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
-{
- u32 mask;
- mask = s6gmac_stats_pending(pd, carry);
- if (mask) {
- writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
- s6gmac_stats_carry(pd, &statinf[carry][0], mask);
- }
-}
-
-static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s6gmac *pd = netdev_priv(dev);
- if (!dev)
- return IRQ_NONE;
- spin_lock(&pd->lock);
- if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
- s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(dev);
- if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
- s6gmac_tx_interrupt(dev);
- s6gmac_stats_interrupt(pd, 0);
- s6gmac_stats_interrupt(pd, 1);
- spin_unlock(&pd->lock);
- return IRQ_HANDLED;
-}
-
-static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
- u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
-{
- writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
- writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
- writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
- writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
-}
-
-static inline void s6gmac_stop_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- writel(0, pd->reg + S6_GMAC_MACCONF1);
-}
-
-static inline void s6gmac_init_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int is_rgmii = !!(pd->phydev->supported
- & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
-#if 0
- writel(1 << S6_GMAC_MACCONF1_SYNCTX |
- 1 << S6_GMAC_MACCONF1_SYNCRX |
- 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RESTXFUNC |
- 1 << S6_GMAC_MACCONF1_RESRXFUNC |
- 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
- 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
- pd->reg + S6_GMAC_MACCONF1);
-#endif
- writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
- udelay(1000);
- writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
- pd->reg + S6_GMAC_MACCONF1);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(1 << S6_GMAC_MACCONF1_TXENA |
- 1 << S6_GMAC_MACCONF1_RXENA |
- (dev->flags & IFF_LOOPBACK ? 1 : 0)
- << S6_GMAC_MACCONF1_LOOPBACK,
- pd->reg + S6_GMAC_MACCONF1);
- writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
- dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
- pd->reg + S6_GMAC_MACMAXFRAMELEN);
- writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
- 1 << S6_GMAC_MACCONF2_PADCRCENA |
- 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
- (pd->link.giga ?
- S6_GMAC_MACCONF2_IFMODE_BYTE :
- S6_GMAC_MACCONF2_IFMODE_NIBBLE)
- << S6_GMAC_MACCONF2_IFMODE |
- 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
- pd->reg + S6_GMAC_MACCONF2);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
- writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
- 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_STFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
- pd->reg + S6_GMAC_FIFOCONF0);
- writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
- 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
- pd->reg + S6_GMAC_FIFOCONF3);
- writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
- 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_OK |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
- 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
- pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
- 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
- pd->reg + S6_GMAC_FIFOCONF5);
- writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
- pd->reg + S6_GMAC_FIFOCONF4);
- s6gmac_set_dstaddr(pd, 0,
- 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 1,
- dev->dev_addr[5] |
- dev->dev_addr[4] << 8 |
- dev->dev_addr[3] << 16 |
- dev->dev_addr[2] << 24,
- dev->dev_addr[1] |
- dev->dev_addr[0] << 8,
- 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 2,
- 0x00000000, 0x00000100, 0x00000000, 0x00000100);
- s6gmac_set_dstaddr(pd, 3,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
-}
-
-static void s6mii_enable(struct s6gmac *pd)
-{
- writel(readl(pd->reg + S6_GMAC_MACCONF1) &
- ~(1 << S6_GMAC_MACCONF1_SOFTRES),
- pd->reg + S6_GMAC_MACCONF1);
- writel((readl(pd->reg + S6_GMAC_MACMIICONF)
- & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
- | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
- pd->reg + S6_GMAC_MACMIICONF);
-}
-
-static int s6mii_busy(struct s6gmac *pd, int tmo)
-{
- while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
- if (--tmo == 0)
- return -ETIME;
- udelay(64);
- }
- return 0;
-}
-
-static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
- writel(0, pd->reg + S6_GMAC_MACMIICMD);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
-}
-
-static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(value, pd->reg + S6_GMAC_MACMIICTRL);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return 0;
-}
-
-static int s6mii_reset(struct mii_bus *bus)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
- return -ETIME;
- return 0;
-}
-
-static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
-{
- u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
- switch (pd->link.mbit) {
- case 10:
- pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 100:
- pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 1000:
- pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- default:
- return;
- }
- writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-}
-
-static inline void s6gmac_linkisup(struct net_device *dev, int isup)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
-
- pd->link.full = phydev->duplex;
- pd->link.giga = (phydev->speed == 1000);
- if (pd->link.mbit != phydev->speed) {
- pd->link.mbit = phydev->speed;
- s6gmac_set_rgmii_txclock(pd);
- }
- pd->link.isup = isup;
- if (isup)
- netif_carrier_on(dev);
- phy_print_status(phydev);
-}
-
-static void s6gmac_adjust_link(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
- if (pd->link.isup &&
- (!phydev->link ||
- (pd->link.mbit != phydev->speed) ||
- (pd->link.full != phydev->duplex))) {
- pd->link.isup = 0;
- netif_tx_disable(dev);
- if (!phydev->link) {
- netif_carrier_off(dev);
- phy_print_status(phydev);
- }
- }
- if (!pd->link.isup && phydev->link) {
- if (pd->link.full != phydev->duplex) {
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- if (phydev->duplex)
- maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
- else
- maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (pd->link.giga != (phydev->speed == 1000)) {
- u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
- << S6_GMAC_MACCONF2_IFMODE);
- if (phydev->speed == 1000) {
- fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
- maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
- << S6_GMAC_MACCONF2_IFMODE;
- } else {
- fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
- maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
- << S6_GMAC_MACCONF2_IFMODE;
- }
- writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
- s6gmac_linkisup(dev, 1);
- }
-}
-
-static inline int s6gmac_phy_start(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int i = 0;
- struct phy_device *p = NULL;
- while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
- i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
- PHY_INTERFACE_MODE_RGMII);
- if (IS_ERR(p)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(p);
- }
- p->supported &= PHY_GBIT_FEATURES;
- p->advertising = p->supported;
- pd->phydev = p;
- return 0;
-}
-
-static inline void s6gmac_init_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 mask;
- mask = 1 << S6_GMAC_STATCARRY1_RDRP |
- 1 << S6_GMAC_STATCARRY1_RJBR |
- 1 << S6_GMAC_STATCARRY1_RFRG |
- 1 << S6_GMAC_STATCARRY1_ROVR |
- 1 << S6_GMAC_STATCARRY1_RUND |
- 1 << S6_GMAC_STATCARRY1_RCDE |
- 1 << S6_GMAC_STATCARRY1_RFLR |
- 1 << S6_GMAC_STATCARRY1_RALN |
- 1 << S6_GMAC_STATCARRY1_RMCA |
- 1 << S6_GMAC_STATCARRY1_RFCS |
- 1 << S6_GMAC_STATCARRY1_RPKT |
- 1 << S6_GMAC_STATCARRY1_RBYT;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
- mask = 1 << S6_GMAC_STATCARRY2_TDRP |
- 1 << S6_GMAC_STATCARRY2_TNCL |
- 1 << S6_GMAC_STATCARRY2_TXCL |
- 1 << S6_GMAC_STATCARRY2_TEDF |
- 1 << S6_GMAC_STATCARRY2_TPKT |
- 1 << S6_GMAC_STATCARRY2_TBYT |
- 1 << S6_GMAC_STATCARRY2_TFRG |
- 1 << S6_GMAC_STATCARRY2_TUND |
- 1 << S6_GMAC_STATCARRY2_TOVR |
- 1 << S6_GMAC_STATCARRY2_TFCS |
- 1 << S6_GMAC_STATCARRY2_TJBR;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
-}
-
-static inline void s6gmac_init_dmac(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
- s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
- s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
- s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
-}
-
-static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&pd->lock, flags);
- writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
- 0 << S6_GMAC_BURST_PREWR_CFE |
- 1 << S6_GMAC_BURST_PREWR_PPE |
- 1 << S6_GMAC_BURST_PREWR_FCS |
- ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
- pd->reg + S6_GMAC_BURST_PREWR);
- s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
- (u32)skb->data, pd->io, skb->len);
- if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_stop_queue(dev);
- if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
- printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
- pd->tx_skb_o, pd->tx_skb_i);
- BUG();
- }
- pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static void s6gmac_tx_timeout(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_tx_interrupt(dev);
- spin_unlock_irqrestore(&pd->lock, flags);
-}
-
-static int s6gmac_open(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- phy_read_status(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- pd->link.mbit = 0;
- s6gmac_linkisup(dev, pd->phydev->link);
- s6gmac_init_device(dev);
- s6gmac_init_stats(dev);
- s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(dev);
- s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
- 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
- s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
- 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
- writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
- 0 << S6_GMAC_HOST_INT_TXPREWOVER |
- 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
- 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
- 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
- pd->reg + S6_GMAC_HOST_INTMASK);
- spin_unlock_irqrestore(&pd->lock, flags);
- phy_start(pd->phydev);
- netif_start_queue(dev);
- return 0;
-}
-
-static int s6gmac_stop(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- netif_stop_queue(dev);
- phy_stop(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_init_dmac(dev);
- s6gmac_stop_device(dev);
- while (pd->tx_skb_i != pd->tx_skb_o)
- dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- while (pd->rx_skb_i != pd->rx_skb_o)
- dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static struct net_device_stats *s6gmac_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
- int i;
- do {
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
- pd->stats[i] =
- pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
- s6gmac_stats_collect(pd, &statinf[0][0]);
- s6gmac_stats_collect(pd, &statinf[1][0]);
- i = s6gmac_stats_pending(pd, 0) |
- s6gmac_stats_pending(pd, 1);
- spin_unlock_irqrestore(&pd->lock, flags);
- } while (i);
- st->rx_errors = st->rx_crc_errors +
- st->rx_frame_errors +
- st->rx_length_errors +
- st->rx_missed_errors;
- st->tx_errors += st->tx_aborted_errors;
- return st;
-}
-
-static int s6gmac_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct s6gmac *pd;
- int res;
- unsigned long i;
- struct mii_bus *mb;
-
- dev = alloc_etherdev(sizeof(*pd));
- if (!dev)
- return -ENOMEM;
-
- dev->open = s6gmac_open;
- dev->stop = s6gmac_stop;
- dev->hard_start_xmit = s6gmac_tx;
- dev->tx_timeout = s6gmac_tx_timeout;
- dev->watchdog_timeo = HZ;
- dev->get_stats = s6gmac_stats;
- dev->irq = platform_get_irq(pdev, 0);
- pd = netdev_priv(dev);
- memset(pd, 0, sizeof(*pd));
- spin_lock_init(&pd->lock);
- pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
- i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
- pd->tx_dma = DMA_MASK_DMAC(i);
- pd->tx_chan = DMA_INDEX_CHNL(i);
- i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
- pd->rx_dma = DMA_MASK_DMAC(i);
- pd->rx_chan = DMA_INDEX_CHNL(i);
- pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
- res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
- goto errirq;
- }
- res = register_netdev(dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "error registering device %s\n",
- dev->name);
- goto errdev;
- }
- mb = mdiobus_alloc();
- if (!mb) {
- printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
- res = -ENOMEM;
- goto errmii;
- }
- mb->name = "s6gmac_mii";
- mb->read = s6mii_read;
- mb->write = s6mii_write;
- mb->reset = s6mii_reset;
- mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
- mb->phy_mask = ~(1 << 0);
- mb->irq = &pd->mii.irq[0];
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- int n = platform_get_irq(pdev, i + 1);
- if (n < 0)
- n = PHY_POLL;
- pd->mii.irq[i] = n;
- }
- mdiobus_register(mb);
- pd->mii.bus = mb;
- res = s6gmac_phy_start(dev);
- if (res)
- return res;
- platform_set_drvdata(pdev, dev);
- return 0;
-errmii:
- unregister_netdev(dev);
-errdev:
- free_irq(dev->irq, dev);
-errirq:
- free_netdev(dev);
- return res;
-}
-
-static int s6gmac_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- if (dev) {
- struct s6gmac *pd = netdev_priv(dev);
- mdiobus_unregister(pd->mii.bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
- return 0;
-}
-
-static struct platform_driver s6gmac_driver = {
- .probe = s6gmac_probe,
- .remove = s6gmac_remove,
- .driver = {
- .name = "s6gmac",
- },
-};
-
-module_platform_driver(s6gmac_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..b1a271853d85 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
/* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
- if (rx_ring->rx_skbuff_dma == NULL)
- goto dmamem_err;
+ if (!rx_ring->rx_skbuff_dma) {
+ dma_free_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+ goto error;
+ }
rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
- if (rx_ring->rx_skbuff == NULL)
- goto rxbuff_err;
+ if (!rx_ring->rx_skbuff) {
+ kfree(rx_ring->rx_skbuff_dma);
+ goto error;
+ }
/* initialise the buffers */
for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
err_init_rx_buffers:
while (--desc_index >= 0)
free_rx_ring(priv->device, rx_ring, desc_index);
- kfree(rx_ring->rx_skbuff);
-rxbuff_err:
- kfree(rx_ring->rx_skbuff_dma);
-dmamem_err:
- dma_free_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- rx_ring->dma_rx, rx_ring->dma_rx_phy);
error:
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 866560ea9e18..b02eed12bfc5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
}
- /* Get MAC address if available (DT) */
- if (mac)
- ether_addr_copy(priv->dev->dev_addr, mac);
-
priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
goto err_drv_remove;
}
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 118a427d1942..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
stmmac_mmc_setup(priv);
- ret = stmmac_init_ptp(priv);
- if (ret && ret != -EOPNOTSUPP)
- pr_warn("%s: failed PTP initialisation\n", __func__);
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+ }
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
goto init_error;
}
- ret = stmmac_hw_setup(dev);
+ ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
@@ -2776,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
*/
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
@@ -2787,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(ndev, device);
@@ -3036,7 +3041,7 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
init_dma_desc_rings(ndev, GFP_ATOMIC);
- stmmac_hw_setup(ndev);
+ stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4032b170fe24..3039de2465ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
},
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 45c408ef67d0..d2835bf7b4fb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
if (IS_ERR(segs)) {
dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c560f9aeb55d..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port);
+ priv->host_port, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int vid;
+
+ if (priv->data.dual_emac)
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ else
+ vid = priv->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+ vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ int value = irq - priv->irqs_table[0];
+
+ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+ * is to make sure we will always write the correct value to the EOI
+ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+ * for TX Interrupt and 3 for MISC Interrupt.
+ */
+ cpdma_ctlr_eoi(priv->dma, value);
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- if (num_tx)
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
#endif
@@ -1630,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
int ret;
- int unreg_mcast_mask;
+ int unreg_mcast_mask = 0;
+ u32 port_mask;
- if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = ALE_ALL_PORTS;
- else
- unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ if (priv->data.dual_emac) {
+ port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = ALE_ALL_PORTS;
- ret = cpsw_ale_add_vlan(priv->ale, vid,
- ALE_ALL_PORTS << priv->host_port,
- 0, ALE_ALL_PORTS << priv->host_port,
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ }
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask << priv->host_port);
if (ret != 0)
return ret;
@@ -1650,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- ALE_ALL_PORTS << priv->host_port,
- ALE_VLAN, vid, 0);
+ port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -1672,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
return cpsw_add_vlan_ale_entry(priv, vid);
}
@@ -1685,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
if (ret != 0)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 097ebe7077ac..5246b3a18ff8 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
+ /* if vid passed is -1 then remove all multicast entry from
+ * the table irrespective of vlan id, if a valid vlan id is
+ * passed then remove only multicast added to that vlan id.
+ * if vlan id doesn't match then move on to next entry.
+ */
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index c0d4127aa549..af1e7ecd87c6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ea712512c7d1..5fae4354722c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -62,6 +62,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@@ -343,9 +344,7 @@ struct emac_priv {
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
const char *phy_id;
-#ifdef CONFIG_OF
struct device_node *phy_node;
-#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv)
if (priv->int_disable)
priv->int_disable();
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
local_irq_restore(flags);
} else {
@@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv)
* register */
/* NOTE: Rx Threshold and Misc interrupts are not enabled */
-
- /* ack rxen only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_RXEN);
-
- /* ack txen- only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_TXEN);
-
} else {
/* Set DM644x control registers for interrupt control */
emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
- pm_runtime_get(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ return ret;
+ }
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
priv->phydev = NULL;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_node->full_name);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phy_id) {
+ if (!priv->phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (priv->phy_id && *priv->phy_id) {
+ if (!priv->phydev && priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev)
"(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else {
+ }
+
+ if (!priv->phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
struct emac_priv *priv = netdev_priv(ndev);
u32 mac_control;
u32 stats_clear_mask;
+ int err;
+
+ err = pm_runtime_get_sync(&priv->pdev->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return &ndev->stats;
+ }
/* update emac hardware stats and reset the registers*/
@@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+ pm_runtime_put(&priv->pdev->dev);
+
return &ndev->stats;
}
@@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
static int davinci_emac_probe(struct platform_device *pdev)
{
int rc = 0;
- struct resource *res;
+ struct resource *res, *res_ctrl;
struct net_device *ndev;
struct emac_priv *priv;
unsigned long hw_ram_addr;
@@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ devm_clk_put(&pdev->dev, emac_clk);
/* TODO: Probe PHY here if possible */
@@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_ctrl) {
+ priv->ctrl_base =
+ devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(priv->ctrl_base))
+ goto no_pdata;
+ } else {
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ }
+
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
- priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
-
hw_ram_addr = pdata->hw_ram_addr;
if (!hw_ram_addr)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_get_sync(&pdev->dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, rc);
+ goto no_cpdma_chan;
+ }
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
+ pm_runtime_put(&pdev->dev);
goto no_cpdma_chan;
}
@@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = {
.hw_ram_addr = 0x01e20000,
};
+static const struct emac_platform_data dm816_emac_data = {
+ .version = EMAC_VERSION_2,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+ {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9c2d91ea0af4..dbcbf0c5bcfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
+ rc = -ENOMEM;
goto nodev;
}
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ rc = -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 44b8d2bad8c3..4c9b4fa1d3c1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -388,7 +388,6 @@ struct axidma_bd {
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
- * @temac_type: axienet type to identify between soft and hard temac
* @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@ struct axienet_local {
int tx_irq;
int rx_irq;
- u32 temac_type;
u32 phy_type;
u32 options; /* Current options word */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4ea2d4e6f1d1..a6d2860b712c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ ret = -ENOMEM;
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op)
if ((be32_to_cpup(p)) >= 0x4000)
lp->jumbo_support = 1;
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
- NULL);
- if (p)
- lp->temac_type = be32_to_cpup(p);
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
if (p)
lp->phy_type = be32_to_cpup(p);
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ ret = -ENODEV;
goto err_iounmap;
}
lp->dma_regs = of_iomap(np, 0);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 24858799c204..9d4ce388510a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
+ rc = -ENXIO;
goto error;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2f48f790c9b4..384ca4f4de4a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -590,6 +590,7 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
+#define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_PACKET_SIZE 4096
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index dd867e6cabd6..9f49c0129a78 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
- * SendsendBufferComplete msg (ie sent
- * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->send_section_size) {
@@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
- revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+ revoke_packet->msg.v1_msg.revoke_send_buf.id =
+ NETVSC_SEND_BUFFER_ID;
ret = vmbus_sendpacket(net_device->dev->channel,
revoke_packet,
@@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
net_device->send_buf_gpadl_handle = 0;
}
if (net_device->send_buf) {
- /* Free up the receive buffer */
+ /* Free up the send buffer */
vfree(net_device->send_buf);
net_device->send_buf = NULL;
}
@@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
- init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle;
- init_packet->msg.v1_msg.send_recv_buf.id = 0;
+ init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
netdev_err(ndev, "Unable to complete send buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
- send_recv_buf_complete.status);
+ send_send_buf_complete.status);
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2e195289ddf4 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
};
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
- if (IS_ERR(dst))
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
goto err;
-
+ }
skb_dst_drop(skb);
skb_dst_set(skb, dst);
err = ip6_local_out(skb);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c530de1e63f5..3ad8ca76196d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -88,6 +88,7 @@ struct kszphy_priv {
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
.has_rmii_ref_clk_sel = true,
};
@@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
-static int ksz8021_config_init(struct phy_device *phydev)
-{
- int rc;
-
- rc = kszphy_config_init(phydev);
- if (rc)
- return rc;
-
- rc = kszphy_broadcast_disable(phydev);
-
- return rc < 0 ? rc : 0;
-}
-
static int ksz9021_load_values_from_of(struct phy_device *phydev,
struct device_node *of_node, u16 reg,
char *field1, char *field2,
@@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 93e224217e24..f7ff493f1e73 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, notify_peers.dw.work);
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ if (val)
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, mcast_rejoin.dw.work);
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ if (val)
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index dcb6d33141e0..1e9cdca37014 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
awd.done = 0;
urb->context = &awd;
- status = usb_submit_urb(urb, GFP_NOIO);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
// something went wrong
usb_free_urb(urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b8a82b86f909..602dc6668c3a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
/* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
/* Make up an ethernet header if the packet doesn't have one.
*
* A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
usb_driver_release_interface(driver, info->data);
}
- /* Never use the same address on both ends of the link, even
- * if the buggy firmware told us to.
+ /* Never use the same address on both ends of the link, even if the
+ * buggy firmware told us to. Or, if device is assigned the well-known
+ * buggy firmware MAC address, replace it with a random address,
*/
- if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+ ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d1c77e81836..bf405f134d3a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
ocp_reg_write(tp, OCP_SRAM_DATA, data);
}
-static u16 sram_read(struct r8152 *tp, u16 addr)
-{
- ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
- return ocp_reg_read(tp, OCP_SRAM_DATA);
-}
-
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1897,6 +1885,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
netif_wake_queue(netdev);
}
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+ int offset = skb_transport_offset(skb);
+
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+ else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+ features &= ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -2502,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = ocp_reg_read(tp, OCP_POWER_CFG);
data |= EN_10M_PLLOFF;
ocp_reg_write(tp, OCP_POWER_CFG, data);
- data = sram_read(tp, SRAM_IMPEDANCE);
- data &= ~RX_DRIVING_MASK;
- sram_write(tp, SRAM_IMPEDANCE, data);
+ sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= PFM_PWM_SWITCH;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
- data = sram_read(tp, SRAM_LPF_CFG);
- data |= LPF_AUTO_TUNE;
- sram_write(tp, SRAM_LPF_CFG, data);
+ /* Enable LPF corner auto tune */
+ sram_write(tp, SRAM_LPF_CFG, 0xf70f);
- data = sram_read(tp, SRAM_10M_AMP1);
- data |= GDAC_IB_UPALL;
- sram_write(tp, SRAM_10M_AMP1, data);
- data = sram_read(tp, SRAM_10M_AMP2);
- data |= AMP_DN;
- sram_write(tp, SRAM_10M_AMP2, data);
+ /* Adjust 10M Amplitude */
+ sram_write(tp, SRAM_10M_AMP1, 0x00af);
+ sram_write(tp, SRAM_10M_AMP2, 0x0208);
set_bit(PHY_RESET, &tp->flags);
}
@@ -3706,6 +3704,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = rtl8152_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_features_check = rtl8152_features_check,
};
static void r8152b_get_version(struct r8152 *tp)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8bd7191572d..5ca97713bfb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
unsigned int r, received = 0;
-again:
received += virtnet_receive(rq, budget - received);
/* Out of packets? */
@@ -771,7 +770,6 @@ again:
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
- goto again;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 49d9f2291998..7fbd89fbe107 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb))
- return -EINVAL;
+ if (IS_ERR(skb)) {
+ err = -EINVAL;
+ goto err;
+ }
skb_scrub_packet(skb, xnet);
@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
- return err;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto err;
+ }
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
+ if (WARN_ON(!skb)) {
+ err = -ENOMEM;
+ goto err;
+ }
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port);
return 0;
+err:
+ dst_release(dst);
+ return err;
}
#endif
@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb))
- return -EINVAL;
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos, ttl, df, src_port, dst_port,
htonl(vni << 8),
!net_eq(vxlan->net, dev_net(vxlan->dev)));
-
- if (err < 0)
+ if (err < 0) {
+ /* skb is already freed. */
+ skb = NULL;
goto rt_tx_error;
+ }
+
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..62b0bf4fdf6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
+ disable_irq(sc->irq);
tasklet_disable(&sc->intr_tq);
tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
r = -EIO;
out:
+ enable_irq(sc->irq);
spin_unlock_bh(&sc->sc_pcu_lock);
tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
- return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
- if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_HANDLED;
/*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3c06e9365949..9880dae2a569 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
*/
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
- (sdiodev->pdata->oob_irq_supported)))
+ (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
bus_if->wowl_supported = true;
#endif
@@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
- if (sdiodev->pdata->oob_irq_supported)
+ if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 91c0cb3c368e..21de4fe6cf2d 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -65,7 +65,8 @@ config IPW2100_DEBUG
config IPW2200
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
- depends on PCI && CFG80211 && CFG80211_WEXT
+ depends on PCI && CFG80211
+ select CFG80211_WEXT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index e5be2d21868f..a5f9198d5747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 10
-#define IWL3160_UCODE_API_MAX 10
+#define IWL7260_UCODE_API_MAX 12
+#define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10
@@ -105,7 +105,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index bf0a95cb7153..3668fc57e770 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 10
+#define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 38de1513e4de..850b85a47806 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
try_again:
/* try next, if any */
- kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
+ kfree(pieces);
return;
out_free_fw:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9564ae173d06..1f7f15eb86da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+#define FH_MEM_TB_MAX_LENGTH (0x00020000)
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index f2a047f6bb3e..660ddb1b7d8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -243,6 +243,10 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ * regardless of the band or the number of the probes. FW will calculate
+ * the actual dwell time.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -253,6 +257,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+ IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1f2acf47bfb2..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
};
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
*/
struct iwl_scan_channel_opt {
__le16 flags;
@@ -672,6 +675,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@@ -681,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
};
enum iwl_scan_priority {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 31a5b3f4266c..20915587c820 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* disallow low power states when the FW is down */
- iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * Disallow low power states when the FW is down by taking
+ * the UCODE_DOWN ref. in case of ongoing hw restart the
+ * ref is already taken, so don't take it again.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
@@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ /*
+ * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ * won't be called in this case).
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
mvm->ucode_loaded = false;
}
@@ -3332,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
msk |= mvmsta->tfd_queue_msk;
}
- if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, msk, true))
- IWL_ERR(mvm, "flush request fail\n");
- mutex_unlock(&mvm->mutex);
- } else {
- mutex_unlock(&mvm->mutex);
+ msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
- /* this can take a while, and we may need/want other operations
- * to succeed while doing this, so do it without the mutex held
- */
- iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
- }
+ if (iwl_mvm_flush_tx_path(mvm, msk, true))
+ IWL_ERR(mvm, "flush request fail\n");
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index e5294d01181e..844bf7c4c8de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
struct iwl_mvm_scan_params {
u32 max_out_time;
@@ -171,15 +173,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit.
*/
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band, int n_ssids)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1);
}
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -331,7 +339,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell =
- iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+ iwl_mvm_get_passive_dwell(mvm,
+ IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell;
} else {
params->passive_fragmented = true;
@@ -348,8 +357,8 @@ not_bound:
params->dwell[band].passive = frag_passive_dwell;
else
params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ iwl_mvm_get_passive_dwell(mvm, band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids);
}
}
@@ -1098,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm))
+ goto out;
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1134,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1290,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- }
-
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1376,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
@@ -1448,6 +1468,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
@@ -1474,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4f15d9decc81..c59d07567d90 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF;
- else if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
- /* tid_tspec will default to 0 = BE when QOS isn't enabled */
- ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index e56e77ef5d2e..917431e30f74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return false;
- if (!mvm->cfg->rx_with_siso_diversity)
+ if (mvm->cfg->rx_with_siso_diversity)
return false;
ieee80211_iterate_active_interfaces_atomic(
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3ee8e3848876..d5aadb00dd9e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -523,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d79a1f44b8e..523fe0c88dcb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset, chunk_sz = section->len;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans);
- /* Upon stop, the APM issues an interrupt if HW RF kill is set.
- * Clean again the interrupt here
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
- /* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 846a2e6e34d8..c70efb9a6e78 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -666,7 +666,8 @@ tx_status_ok:
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
- u8 *entry, int rxring_idx, int desc_idx)
+ struct sk_buff *new_skb, u8 *entry,
+ int rxring_idx, int desc_idx)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1;
struct sk_buff *skb;
+ if (likely(new_skb)) {
+ skb = new_skb;
+ goto remap;
+ }
skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb)
return 0;
- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
bufferaddress = *((dma_addr_t *)skb->cb);
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx];
+ struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ /* get a new skb - if fail, old one will be reused */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ pr_err("Allocation of new skb failed in %s\n",
+ __func__);
+ goto no_new;
+ }
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
&rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
schedule_work(&rtlpriv->works.lps_change_work);
}
end:
+ skb = new_skb;
+no_new:
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
} else {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+ rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
-
if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index efbaf2ae1999..794204e34fba 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
}
queue->remaining_credit = credit_bytes;
+ queue->credit_usec = credit_usec;
err = connect_rings(be, queue);
if (err) {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 22bcb4e12e2a..d8c10764f130 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -88,10 +88,8 @@ struct netfront_cb {
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct netfront_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
@@ -160,7 +158,8 @@ struct netfront_info {
struct netfront_queue *queues;
/* Statistics */
- struct netfront_stats __percpu *stats;
+ struct netfront_stats __percpu *rx_stats;
+ struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup;
};
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
+ struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx;
char *data = skb->data;
RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (notify)
notify_remote_via_irq(queue->tx_irq);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->bytes += skb->len;
+ tx_stats->packets++;
+ u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+ struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
continue;
}
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */
napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
int cpu;
for_each_possible_cpu(cpu) {
- struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+ struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+ struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
#endif
};
+static void xennet_free_netdev(struct net_device *netdev)
+{
+ struct netfront_info *np = netdev_priv(netdev);
+
+ free_percpu(np->rx_stats);
+ free_percpu(np->tx_stats);
+ free_netdev(netdev);
+}
+
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL;
err = -ENOMEM;
- np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
- if (np->stats == NULL)
+ np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->rx_stats == NULL)
+ goto exit;
+ np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->tx_stats == NULL)
goto exit;
netdev->netdev_ops = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
exit:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
return ERR_PTR(err);
}
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
return 0;
fail:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
info->queues = NULL;
}
- free_percpu(info->stats);
-
- free_netdev(info->netdev);
+ xennet_free_netdev(info->netdev);
return 0;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ea63fbd228ed..352b4f28f82c 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
ret = of_overlay_apply_one(ov, tchild, child);
if (ret)
return ret;
-
- /* The properties are already copied, now do the child nodes */
- for_each_child_of_node(child, grandchild) {
- ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
- if (ret) {
- pr_err("%s: Failed to apply single node @%s/%s\n",
- __func__, tchild->full_name,
- grandchild->name);
- return ret;
- }
- }
}
return ret;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..b0d50d70a8a1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
size = dev->coherent_dma_mask;
} else {
offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
dev->dma_pfn_offset = offset;
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
+ /* already populated? (driver using of_populate manually) */
+ if (of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
break;
case OF_RECONFIG_CHANGE_REMOVE:
+
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
pdev = of_find_device_by_node(rd->dn);
if (pdev == NULL)
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 75976da22b2e..a2b687d5f324 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -176,5 +176,60 @@
};
};
+ overlay10 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest10 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <10>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest101 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
+
+ overlay11 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest11 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <11>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest111 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 844838e11ef1..41a4a138f53b 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
}
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
return 0;
}
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
selftest(1, "overlay test %d passed\n", 8);
}
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+ int ret;
+ char *child_path;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
+ return;
+
+ child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+ selftest_path(10));
+ if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+ return;
+
+ ret = of_path_platform_device_exists(child_path);
+ kfree(child_path);
+ if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+ return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
+ return;
+}
+
static void __init of_selftest_overlay(void)
{
struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
of_selftest_overlay_6();
of_selftest_overlay_8();
+ of_selftest_overlay_10();
+ of_selftest_overlay_11();
+
out:
of_node_put(bus_np);
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 37e71ff6408d..dceb9ddfd99a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
int i;
/* PCI-PCI Bridge */
pci_read_bridge_bases(bus);
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
- pci_claim_resource(bus->self, i);
- }
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+ pci_claim_bridge_resource(bus->self, i);
} else {
/* Host-PCI Bridge */
int err;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 73aef51a28f0..8fb16188cd82 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
}
EXPORT_SYMBOL(pci_bus_alloc_resource);
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
+ * resource fits inside a window of an upstream bridge, do nothing. If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+ struct pci_bus *bus = dev->bus;
+ struct resource *res = &dev->resource[idx];
+ struct resource orig_res = *res;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t start, end;
+
+ if (!r)
+ continue;
+
+ if (resource_type(res) != resource_type(r))
+ continue;
+
+ start = max(r->start, res->start);
+ end = min(r->end, res->end);
+
+ if (start > end)
+ continue; /* no overlap */
+
+ if (res->start == start && res->end == end)
+ return false; /* no change */
+
+ res->start = start;
+ res->end = end;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
+ return true;
+ }
+
+ return false;
+}
+
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cab05f31223f..e9d4fd861ba1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (dev->subordinate || !dev->slot)
+ if (dev->subordinate || !dev->slot ||
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
@@ -3607,6 +3623,22 @@ unlock:
return 0;
}
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
{
int rc;
- if (!slot)
+ if (!slot || !pci_slot_resetable(slot))
return -ENOTTY;
if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
- if (!bus->self)
+ if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
if (probe)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8aff29a804ff..d54632a1db43 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ed6f89b6efe5..e52356aa09b8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0482235eee92..e3e17f3c0f0f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
config space writes, so it's quite possible that an I/O window of
the bridge will have some undesirable address (e.g. 0) after the
first write. Ditto 64-bit prefetchable MMIO. */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
- res = bus->resource[0];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus. */
- res = bus->resource[1];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
- res = bus->resource[2];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
&bus->busn_res);
if (type & IORESOURCE_IO)
- pci_setup_bridge_io(bus);
+ pci_setup_bridge_io(bridge);
if (type & IORESOURCE_MEM)
- pci_setup_bridge_mmio(bus);
+ pci_setup_bridge_mmio(bridge);
if (type & IORESOURCE_PREFETCH)
- pci_setup_bridge_mmio_pref(bus);
+ pci_setup_bridge_mmio_pref(bridge);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
__pci_setup_bridge(bus, type);
}
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+ return 0;
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed the window */
+
+ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return 0;
+
+ if (!pci_bus_clip_resource(bridge, i))
+ return -EINVAL; /* clipping didn't change anything */
+
+ switch (i - PCI_BRIDGE_RESOURCES) {
+ case 0:
+ pci_setup_bridge_io(bridge);
+ break;
+ case 1:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case 2:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed a smaller window */
+
+ return -EINVAL;
+}
+
/* Check whether the bridge supports optional I/O and
prefetchable memory ranges. If not, the respective
base/limit registers must be read-only and read as 0. */
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index e34da13885e8..27fa62ce6136 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy)
ret = miphy28lp_init_usb3(miphy_phy);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
mutex_unlock(&miphy_dev->miphy_mutex);
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index c96e8183a8ff..efe724f97e02 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -29,10 +29,9 @@
/**
* omap_control_pcie_pcs - set the PCS delay count
* @dev: the control module device
- * @id: index of the pcie PHY (should be 1 or 2)
* @delay: 8 bit delay value
*/
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
u32 val;
struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
val = readl(control_phy->pcie_pcs);
val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
- (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT));
- val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
writel(val, control_phy->pcie_pcs);
}
EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fb02a67c9181..a2b08f3ccb03 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
else
data->num_phys = 3;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
data->disc_thresh = 3;
else
data->disc_thresh = 2;
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 1387b4d4afe3..465de2c800f2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -82,7 +82,6 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- u8 id;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ /*
+ * Set pcie_pcs register to 0x96 for proper functioning of phy
+ * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
+ * 18-1804.
+ */
if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
- omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1);
+ omap_control_pcie_pcs(phy->control_dev, 0x96);
return 0;
}
@@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
- if (of_property_read_u8(node, "id", &phy->id) < 0)
- phy->id = 1;
clk = devm_clk_get(phy->dev, "dpll_ref");
if (IS_ERR(clk)) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..89dca77ca038 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
if (pctldev == NULL)
return;
- mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
-
pinctrl_remove_device_debugfs(pctldev);
+ mutex_unlock(&pctldev->mutex);
if (!IS_ERR(pctldev->p))
pinctrl_put(pctldev->p);
+ mutex_lock(&pinctrldev_list_mutex);
+ mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
/* Destroy descriptor tree */
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
- int nbanks;
+ int nactive_banks;
uint32_t *mux_mask;
int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
int mux;
/* check if it's a valid config */
- if (pin->bank >= info->nbanks) {
+ if (pin->bank >= gpio_banks) {
dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
- name, index, pin->bank, info->nbanks);
+ name, index, pin->bank, gpio_banks);
return -EINVAL;
}
+ if (!gpio_chips[pin->bank]) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+ name, index, pin->bank);
+ return -ENXIO;
+ }
+
if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, gpio_compat)) {
- info->nbanks++;
+ if (of_device_is_available(child))
+ info->nactive_banks++;
} else {
info->nfunctions++;
info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
}
size /= sizeof(*list);
- if (!size || size % info->nbanks) {
- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+ if (!size || size % gpio_banks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
return -EINVAL;
}
- info->nmux = size / info->nbanks;
+ info->nmux = size / gpio_banks;
info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (info->nbanks < 1) {
+ if (gpio_banks < 1) {
dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
dev_dbg(&pdev->dev, "mux-mask\n");
tmp = info->mux_mask;
- for (i = 0; i < info->nbanks; i++) {
+ for (i = 0; i < gpio_banks; i++) {
for (j = 0; j < info->nmux; j++, tmp++) {
dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
}
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k;
+ int ret, i, j, k, ngpio_chips_enabled = 0;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
* to obtain references to the struct gpio_chip * for them, and we
* need this to proceed.
*/
- for (i = 0; i < info->nbanks; i++) {
- if (!gpio_chips[i]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks) {
+ dev_warn(&pdev->dev,
+ "All GPIO chips are not registered yet (%d/%d)\n",
+ ngpio_chips_enabled, info->nactive_banks);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
}
at91_pinctrl_desc.name = dev_name(&pdev->dev);
- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
if (!at91_pinctrl_desc.pins)
return -ENOMEM;
- for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (i = 0, k = 0; i < gpio_banks; i++) {
for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
pdesc->number = k;
pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- for (i = 0; i < info->nbanks; i++)
- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
+ struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
- int ret;
+ int ret, i;
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
return ret;
}
- /* Setup chained handler */
- if (at91_gpio->pioc_idx)
- prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
/* The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
- if (prev && prev->next == at91_gpio)
+ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+ if (!gpiochip_prev) {
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&at91_gpio->chip,
+ &gpio_irqchip,
+ at91_gpio->pioc_virq,
+ gpio_irq_handler);
return 0;
+ }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
- return 0;
+ /* we can only have 2 banks before */
+ for (i = 0; i < 2; i++) {
+ if (prev->next) {
+ prev = prev->next;
+ } else {
+ prev->next = at91_gpio;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void at91_gpio_probe_fixup(void)
-{
- unsigned i;
- struct at91_gpio_chip *at91_gpio, *last = NULL;
-
- for (i = 0; i < gpio_banks; i++) {
- at91_gpio = gpio_chips[i];
-
- /*
- * GPIO controller are grouped on some SoC:
- * PIOC, PIOD and PIOE can share the same IRQ line
- */
- if (last && last->pioc_virq == at91_gpio->pioc_virq)
- last->next = at91_gpio;
- last = at91_gpio;
- }
-}
-
static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- at91_gpio_probe_fixup();
-
ret = at91_gpio_of_irq_setup(pdev, at91_chip);
if (ret)
goto irq_setup_err;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ba74f0aa60c7..43eacc924b7e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,6 +89,7 @@ struct rockchip_iomux {
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
* @pin_base: first pin number
* @nr_pins: number of pins in this bank
* @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
struct regmap *regmap_pull;
struct clk *clk;
int irq;
+ u32 saved_enables;
u32 pin_base;
u8 nr_pins;
char *name;
@@ -1396,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
- u32 polarity = 0, data = 0;
u32 pend;
- bool edge_changed = false;
- unsigned long flags;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1407,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
- if (bank->toggle_edge_mode) {
- polarity = readl_relaxed(bank->reg_base +
- GPIO_INT_POLARITY);
- data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
- }
-
while (pend) {
unsigned int virq;
@@ -1432,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
* needs manual intervention.
*/
if (bank->toggle_edge_mode & BIT(irq)) {
- if (data & BIT(irq))
- polarity &= ~BIT(irq);
- else
- polarity |= BIT(irq);
+ u32 data, data_old, polarity;
+ unsigned long flags;
- edge_changed = true;
- }
+ data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+ do {
+ spin_lock_irqsave(&bank->slock, flags);
- generic_handle_irq(virq);
- }
+ polarity = readl_relaxed(bank->reg_base +
+ GPIO_INT_POLARITY);
+ if (data & BIT(irq))
+ polarity &= ~BIT(irq);
+ else
+ polarity |= BIT(irq);
+ writel(polarity,
+ bank->reg_base + GPIO_INT_POLARITY);
- if (bank->toggle_edge_mode && edge_changed) {
- /* Interrupt params should only be set with ints disabled */
- spin_lock_irqsave(&bank->slock, flags);
+ spin_unlock_irqrestore(&bank->slock, flags);
- data = readl_relaxed(bank->reg_base + GPIO_INTEN);
- writel_relaxed(0, bank->reg_base + GPIO_INTEN);
- writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
- writel(data, bank->reg_base + GPIO_INTEN);
+ data_old = data;
+ data = readl_relaxed(bank->reg_base +
+ GPIO_EXT_PORT);
+ } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+ }
- spin_unlock_irqrestore(&bank->slock, flags);
+ generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
@@ -1543,6 +1540,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+ irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val &= ~d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val |= d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
static int rockchip_interrupts_register(struct platform_device *pdev,
struct rockchip_pinctrl *info)
{
@@ -1581,12 +1623,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
- gc->chip_types[0].regs.mask = GPIO_INTEN;
+ gc->chip_types[0].regs.mask = GPIO_INTMASK;
gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+ gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+ gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+ gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7c9d51382248..9e5ec00084bb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
unsigned long config;
- st_pinconf_get(pctldev, pin_id, &config);
+ mutex_unlock(&pctldev->mutex);
+ st_pinconf_get(pctldev, pin_id, &config);
+ mutex_lock(&pctldev->mutex);
seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
static struct irq_chip st_gpio_irqchip = {
.name = "GPIO",
+ .irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index c5cef59f5965..779950c62e53 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* load the gpio chip */
xway_chip.dev = &pdev->dev;
- of_gpiochip_add(&xway_chip);
ret = gpiochip_add(&xway_chip);
if (ret) {
- of_gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e730935fa457..ed7017df065d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
{
- int i = 0;
+ int i;
const struct msm_function *func = pctrl->soc->functions;
- for (; i <= pctrl->soc->nfunctions; i++)
+ for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
pctrl->restart_nb.priority = 128;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 9411eae39a4e..3d21efe11d7b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,11 +2,9 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
- * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package:
- * Copyright (C) 2005-2014 Dell Inc.
+ * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ * Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,13 +32,6 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
-
- int needs_kbd_timeouts;
- /*
- * Ordered list of timeouts expressed in seconds.
- * The list must end with -1
- */
- int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
- .needs_kbd_timeouts = 1,
- .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
- {
- .callback = dmi_matched,
- .ident = "Dell XPS13 9333",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
- },
- .driver_data = &quirk_dell_xps13_9333,
- },
{ }
};
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
{
int i;
-
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return i;
+ return da_tokens[i].location;
}
return -1;
}
-static int find_token_location(int tokenid)
-{
- int id;
-
- id = find_token_id(tokenid);
- if (id == -1)
- return -1;
-
- return da_tokens[id].location;
-}
-
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-static inline int dell_smi_error(int value)
-{
- switch (value) {
- case 0: /* Completed successfully */
- return 0;
- case -1: /* Completed with error */
- return -EIO;
- case -2: /* Function not supported */
- return -ENXIO;
- default: /* Unknown error */
- return -EINVAL;
- }
-}
-
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
- out:
+out:
release_buffer();
return ret;
}
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
- out:
+out:
release_buffer();
return ret;
}
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of user-selectable modes
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * cbRES2, byte2 Reserved for future use
- * cbRES2, byte3 Keyboard illumination type
- * 0 Reserved
- * 1 Tasklight
- * 2 Backlight
- * 3-255 Reserved for future use
- * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES3, byte1 Supported timeout unit bitmap
- * bit 0 Seconds
- * bit 1 Minutes
- * bit 2 Hours
- * bit 3 Days
- * bits 4-7 Reserved for future use
- * cbRES3, byte2 Number of keyboard light brightness levels
- * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
- * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
- * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
- * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbRES2, byte2 Currently active auto keyboard illumination triggers.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES2, byte3 Current Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- * are set upon return from the [Get feature information] call.
- * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
- * cbRES3, byte1 Current ALS reading
- * cbRES3, byte2 Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbArg2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
- * keyboard to turn off automatically.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbArg2, byte3 Desired Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
- * cbArg3, byte2 Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
- KBD_TIMEOUT_SECONDS = 0,
- KBD_TIMEOUT_MINUTES,
- KBD_TIMEOUT_HOURS,
- KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
- KBD_MODE_BIT_OFF = 0,
- KBD_MODE_BIT_ON,
- KBD_MODE_BIT_ALS,
- KBD_MODE_BIT_TRIGGER_ALS,
- KBD_MODE_BIT_TRIGGER,
- KBD_MODE_BIT_TRIGGER_25,
- KBD_MODE_BIT_TRIGGER_50,
- KBD_MODE_BIT_TRIGGER_75,
- KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
- ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
- u16 modes;
- u8 type;
- u8 triggers;
- u8 levels;
- u8 seconds;
- u8 minutes;
- u8 hours;
- u8 days;
-};
-
-struct kbd_state {
- u8 mode_bit;
- u8 triggers;
- u8 timeout_value;
- u8 timeout_unit;
- u8 als_setting;
- u8 als_value;
- u8 level;
-};
-
-static const int kbd_tokens[] = {
- KBD_LED_OFF_TOKEN,
- KBD_LED_AUTO_25_TOKEN,
- KBD_LED_AUTO_50_TOKEN,
- KBD_LED_AUTO_75_TOKEN,
- KBD_LED_AUTO_100_TOKEN,
- KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
- u8 units;
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x0;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- info->modes = buffer->output[1] & 0xFFFF;
- info->type = (buffer->output[1] >> 24) & 0xFF;
- info->triggers = buffer->output[2] & 0xFF;
- units = (buffer->output[2] >> 8) & 0xFF;
- info->levels = (buffer->output[2] >> 16) & 0xFF;
-
- if (units & BIT(0))
- info->seconds = (buffer->output[3] >> 0) & 0xFF;
- if (units & BIT(1))
- info->minutes = (buffer->output[3] >> 8) & 0xFF;
- if (units & BIT(2))
- info->hours = (buffer->output[3] >> 16) & 0xFF;
- if (units & BIT(3))
- info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
- if (kbd_info.levels != 0)
- return kbd_info.levels;
- if (kbd_mode_levels_count > 0)
- return kbd_mode_levels_count - 1;
- return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
- int i;
-
- if (kbd_info.levels != 0)
- return state->level;
-
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < kbd_mode_levels_count; ++i)
- if (kbd_mode_levels[i] == state->mode_bit)
- return i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
- if (kbd_info.levels != 0) {
- if (level != 0)
- kbd_previous_level = level;
- if (state->level == level)
- return 0;
- state->level = level;
- if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
- state->mode_bit = kbd_previous_mode_bit;
- else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit = state->mode_bit;
- state->mode_bit = KBD_MODE_BIT_OFF;
- }
- return 0;
- }
-
- if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
- if (level != 0)
- kbd_previous_level = level;
- state->mode_bit = kbd_mode_levels[level];
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x1;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
- if (state->mode_bit != 0)
- state->mode_bit--;
-
- state->triggers = (buffer->output[1] >> 16) & 0xFF;
- state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
- state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
- state->als_setting = buffer->output[2] & 0xFF;
- state->als_value = (buffer->output[2] >> 8) & 0xFF;
- state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
- int ret;
-
- ret = kbd_set_state(state);
- if (ret == 0)
- return 0;
-
- /*
- * When setting the new state fails,try to restore the previous one.
- * This is needed on some machines where BIOS sets a default state when
- * setting a new state fails. This default state could be all off.
- */
-
- if (kbd_set_state(old))
- pr_err("Setting old previous keyboard state failed\n");
-
- return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
- int id;
- int ret;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- buffer->input[1] = da_tokens[id].value;
- dell_send_request(buffer, 1, 0);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
- int id;
- int ret;
- int val;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- dell_send_request(buffer, 0, 0);
- ret = buffer->output[0];
- val = buffer->output[1];
- release_buffer();
-
- if (ret)
- return dell_smi_error(ret);
-
- return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
- ret = kbd_get_token_bit(i);
- if (ret == 1)
- return i;
- }
-
- return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
- return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
- struct kbd_state state;
- int ret;
- int i;
-
- ret = kbd_get_info(&kbd_info);
- if (ret)
- return ret;
-
- kbd_get_state(&state);
-
- /* NOTE: timeout value is stored in 6 bits so max value is 63 */
- if (kbd_info.seconds > 63)
- kbd_info.seconds = 63;
- if (kbd_info.minutes > 63)
- kbd_info.minutes = 63;
- if (kbd_info.hours > 63)
- kbd_info.hours = 63;
- if (kbd_info.days > 63)
- kbd_info.days = 63;
-
- /* NOTE: On tested machines ON mode did not work and caused
- * problems (turned backlight off) so do not use it
- */
- kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
- kbd_previous_level = kbd_get_level(&state);
- kbd_previous_mode_bit = state.mode_bit;
-
- if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
- kbd_previous_level = 1;
-
- if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit =
- ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
- if (kbd_previous_mode_bit != 0)
- kbd_previous_mode_bit--;
- }
-
- if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
- BIT(KBD_MODE_BIT_TRIGGER_ALS)))
- kbd_als_supported = true;
-
- if (kbd_info.modes & (
- BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
- BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
- BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
- ))
- kbd_triggers_supported = true;
-
- /* kbd_mode_levels[0] is reserved, see below */
- for (i = 0; i < 16; ++i)
- if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
- kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
- /*
- * Find the first supported mode and assign to kbd_mode_levels[0].
- * This should be 0 (off), but we cannot depend on the BIOS to
- * support 0.
- */
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < 16; ++i) {
- if (BIT(i) & kbd_info.modes) {
- kbd_mode_levels[0] = i;
- break;
- }
- }
- kbd_mode_levels_count++;
- }
-
- return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
- if (find_token_id(kbd_tokens[i]) != -1)
- kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
- int ret;
-
- ret = kbd_init_info();
- kbd_init_tokens();
-
- if (kbd_token_bits != 0 || ret == 0)
- kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool convert;
- int value;
- int ret;
- char ch;
- u8 unit;
- int i;
-
- ret = sscanf(buf, "%d %c", &value, &ch);
- if (ret < 1)
- return -EINVAL;
- else if (ret == 1)
- ch = 's';
-
- if (value < 0)
- return -EINVAL;
-
- convert = false;
-
- switch (ch) {
- case 's':
- if (value > kbd_info.seconds)
- convert = true;
- unit = KBD_TIMEOUT_SECONDS;
- break;
- case 'm':
- if (value > kbd_info.minutes)
- convert = true;
- unit = KBD_TIMEOUT_MINUTES;
- break;
- case 'h':
- if (value > kbd_info.hours)
- convert = true;
- unit = KBD_TIMEOUT_HOURS;
- break;
- case 'd':
- if (value > kbd_info.days)
- convert = true;
- unit = KBD_TIMEOUT_DAYS;
- break;
- default:
- return -EINVAL;
- }
-
- if (quirks && quirks->needs_kbd_timeouts)
- convert = true;
-
- if (convert) {
- /* Convert value from current units to seconds */
- switch (unit) {
- case KBD_TIMEOUT_DAYS:
- value *= 24;
- case KBD_TIMEOUT_HOURS:
- value *= 60;
- case KBD_TIMEOUT_MINUTES:
- value *= 60;
- unit = KBD_TIMEOUT_SECONDS;
- }
-
- if (quirks && quirks->needs_kbd_timeouts) {
- for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
- if (value <= quirks->kbd_timeouts[i]) {
- value = quirks->kbd_timeouts[i];
- break;
- }
- }
- }
-
- if (value <= kbd_info.seconds && kbd_info.seconds) {
- unit = KBD_TIMEOUT_SECONDS;
- } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
- value /= 60;
- unit = KBD_TIMEOUT_MINUTES;
- } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
- value /= (60 * 60);
- unit = KBD_TIMEOUT_HOURS;
- } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
- value /= (60 * 60 * 24);
- unit = KBD_TIMEOUT_DAYS;
- } else {
- return -EINVAL;
- }
- }
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.timeout_value = value;
- new_state.timeout_unit = unit;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
- int len;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = sprintf(buf, "%d", state.timeout_value);
-
- switch (state.timeout_unit) {
- case KBD_TIMEOUT_SECONDS:
- return len + sprintf(buf+len, "s\n");
- case KBD_TIMEOUT_MINUTES:
- return len + sprintf(buf+len, "m\n");
- case KBD_TIMEOUT_HOURS:
- return len + sprintf(buf+len, "h\n");
- case KBD_TIMEOUT_DAYS:
- return len + sprintf(buf+len, "d\n");
- default:
- return -EINVAL;
- }
-
- return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
- kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
- "keyboard",
- "touchpad",
- /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
- "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool triggers_enabled = false;
- bool als_enabled = false;
- bool disable_als = false;
- bool enable_als = false;
- int trigger_bit = -1;
- char trigger[21];
- int i, ret;
-
- ret = sscanf(buf, "%20s", trigger);
- if (ret != 1)
- return -EINVAL;
-
- if (trigger[0] != '+' && trigger[0] != '-')
- return -EINVAL;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- if (kbd_als_supported)
- als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
- if (kbd_triggers_supported)
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
- if (kbd_als_supported) {
- if (strcmp(trigger, "+als") == 0) {
- if (als_enabled)
- return count;
- enable_als = true;
- } else if (strcmp(trigger, "-als") == 0) {
- if (!als_enabled)
- return count;
- disable_als = true;
- }
- }
-
- if (enable_als || disable_als) {
- new_state = state;
- if (enable_als) {
- if (triggers_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- } else {
- if (triggers_enabled) {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- } else {
- new_state.mode_bit = KBD_MODE_BIT_ON;
- }
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- if (kbd_triggers_supported) {
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
- continue;
- if (trigger[0] == '+' &&
- triggers_enabled && (state.triggers & BIT(i)))
- return count;
- if (trigger[0] == '-' &&
- (!triggers_enabled || !(state.triggers & BIT(i))))
- return count;
- trigger_bit = i;
- break;
- }
- }
-
- if (trigger_bit != -1) {
- new_state = state;
- if (trigger[0] == '+')
- new_state.triggers |= BIT(trigger_bit);
- else {
- new_state.triggers &= ~BIT(trigger_bit);
- /* NOTE: trackstick bit (2) must be disabled when
- * disabling touchpad bit (1), otherwise touchpad
- * bit (1) will not be disabled */
- if (trigger_bit == 1)
- new_state.triggers &= ~BIT(2);
- }
- if ((kbd_info.triggers & new_state.triggers) !=
- new_state.triggers)
- return -EINVAL;
- if (new_state.triggers && !triggers_enabled) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- }
- } else if (new_state.triggers == 0) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- else
- kbd_set_level(&new_state, 0);
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- if (new_state.mode_bit != KBD_MODE_BIT_OFF)
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- bool triggers_enabled;
- int level, i, ret;
- int len = 0;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = 0;
-
- if (kbd_triggers_supported) {
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
- level = kbd_get_level(&state);
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if ((triggers_enabled || level <= 0) &&
- (state.triggers & BIT(i)))
- buf[len++] = '+';
- else
- buf[len++] = '-';
- len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
- }
- }
-
- if (kbd_als_supported) {
- if (kbd_is_als_mode_bit(state.mode_bit))
- len += sprintf(buf+len, "+als ");
- else
- len += sprintf(buf+len, "-als ");
- }
-
- if (len)
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
- kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u8 setting;
- int ret;
-
- ret = kstrtou8(buf, 10, &setting);
- if (ret)
- return ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.als_setting = setting;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
- kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
- &dev_attr_stop_timeout.attr,
- &dev_attr_start_triggers.attr,
- &dev_attr_als_setting.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
- int ret;
- u16 num;
- struct kbd_state state;
-
- if (kbd_get_max_level()) {
- ret = kbd_get_state(&state);
- if (ret)
- return 0;
- ret = kbd_get_level(&state);
- if (ret < 0)
- return 0;
- return ret;
- }
-
- if (kbd_get_valid_token_counts()) {
- ret = kbd_get_first_active_token_bit();
- if (ret < 0)
- return 0;
- for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return 0;
- return ffs(num) - 1;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
- return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u16 num;
-
- if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
- new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
- }
-
- if (kbd_get_valid_token_counts()) {
- for (num = kbd_token_bits; num != 0 && value > 0; --value)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
- .name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
- .brightness_get = kbd_led_level_get,
- .groups = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
- kbd_init();
- if (!kbd_led_present)
- return -ENODEV;
- kbd_led.max_brightness = kbd_get_max_level();
- if (!kbd_led.max_brightness) {
- kbd_led.max_brightness = kbd_get_valid_token_counts();
- if (kbd_led.max_brightness)
- kbd_led.max_brightness--;
- }
- return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
- if (!kbd_led_present)
- return;
- kbd_led.brightness_set = brightness_set_exit;
- led_classdev_unregister(&kbd_led);
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
- kbd_led_init(&platform_device->dev);
-
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index c71443c4f265..97b5e4ee1ca4 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+ RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
{}
};
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e225711bb8bc..9c48fb32f660 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
/* remove any sysfs entries */
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ mutex_lock(&rdev->mutex);
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
rdev->open_count--;
rdev->exclusive = 0;
+ mutex_unlock(&rdev->mutex);
module_put(rdev->owner);
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index c1444c3d84c2..ff828117798f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
.enable_mask = S2MPS14_ENABLE_MASK \
}
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
static int s2mps14_regulator_enable(struct regulator_dev *rdev)
@@ -570,7 +604,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck(num, min, step) { \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
@@ -579,7 +613,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
- .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .linear_min_sel = min_sel, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
@@ -613,11 +647,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
- regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
- regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+ S2MPS14_BUCK4_START_SEL),
+ regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index eebc52cb6984..3d95c87160b3 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
goto err_alloc;
}
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 32;
data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = resource_size(res) * 32;
data->rcdev.ops = &sunxi_reset_ops;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b5e7c4670205..89ac1d5083c6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
{ "s2mps14-rtc", S2MPS14X },
+ { },
};
static struct platform_driver s5m_rtc_driver = {
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91e97ec01418..4d41bf75c233 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
*/
static inline int ap_test_config_domain(unsigned int domain)
{
- if (!ap_configuration)
- return 1;
- return ap_test_config(ap_configuration->aqm, domain);
+ if (!ap_configuration) /* QCI not supported */
+ if (domain < 16)
+ return 1; /* then domains 0...15 are configured */
+ else
+ return 0;
+ else
+ return ap_test_config(ap_configuration->aqm, domain);
}
/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+/**
+ * qeth_send_control_data() - send control command to the card
+ * @card: qeth_card structure pointer
+ * @len: size of the command buffer
+ * @iob: qeth_cmd_buffer pointer
+ * @reply_cb: callback function pointer
+ * @cb_card: pointer to the qeth_card structure
+ * @cb_reply: pointer to the qeth_reply structure
+ * @cb_cmd: pointer to the original iob for non-IPA
+ * commands, or to the qeth_ipa_cmd structure
+ * for the IPA commands.
+ * @reply_param: private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
int qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
void *reply_param)
{
int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- iob = qeth_wait_for_buffer(&card->write);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ iob = qeth_get_buffer(&card->write);
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ } else {
+ dev_warn(&card->gdev->dev,
+ "The qeth driver ran out of channel command buffers\n");
+ QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+ dev_name(&card->gdev->dev));
+ }
return iob;
}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "strtlan");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
QETH_PROT_IPV4);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+ cmd->data.setadapterparms.hdr.command_code = command;
+ cmd->data.setadapterparms.hdr.used_total = 1;
+ cmd->data.setadapterparms.hdr.seq_no = 1;
+ }
return iob;
}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -ENOMEDIUM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 80;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_set_access_ctrl));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-
+out:
kfree(ureq);
kfree(qinfo.udata);
return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_query_oat));
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
return -EOPNOTSUPP;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
(void *)carrier_info);
}
@@ -5060,11 +5133,23 @@ retriable:
card->options.adp.supported_funcs = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
- qeth_query_ipassists(card, QETH_PROT_IPV4);
- if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
- qeth_query_setadapterparms(card);
- if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
- qeth_query_setdiagass(card);
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+ if (rc == -ENOMEM)
+ goto out;
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ goto out;
+ }
+ }
+ if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ rc = qeth_query_setdiagass(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ goto out;
+ }
+ }
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..ce87ae72edbd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long));
+ enum qeth_ipa_cmds);
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
return ndev;
}
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- /* MAC already registered, needed in couple/uncouple case */
- if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
- mac, QETH_CARD_IFNAME(card));
- cmd->hdr.return_code = 0;
+ if (retcode)
+ QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+ switch (retcode) {
+ case IPA_RC_SUCCESS:
+ rc = 0;
+ break;
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_L2_ADDR_TABLE_FULL:
+ rc = -ENOSPC;
+ break;
+ case IPA_RC_L2_DUP_MAC:
+ case IPA_RC_L2_DUP_LAYER3_MAC:
+ rc = -EEXIST;
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ rc = -EPERM;
+ break;
+ case IPA_RC_L2_MAC_NOT_FOUND:
+ rc = -ENOENT;
+ break;
+ case -ENOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EIO;
+ break;
}
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ return rc;
}
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
- qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETGMAC));
+ if (rc == -EEXIST)
+ QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+ mac, QETH_CARD_IFNAME(card));
+ else if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
- qeth_l2_send_delgroupmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELGMAC));
+ if (rc)
+ QETH_DBF_MESSAGE(2,
+ "Could not delete group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
mc->is_vmac = vmac;
if (vmac) {
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- NULL);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
} else {
- rc = qeth_l2_send_setgroupmac(card, mac);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setgroupmac(card, mac));
}
if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
if (del) {
if (mc->is_vmac)
qeth_l2_send_setdelmac(card, mc->mc_addr,
- IPA_CMD_DELVMAC, NULL);
+ IPA_CMD_DELVMAC);
else
qeth_l2_send_delgroupmac(card, mc->mc_addr);
}
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
+ int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
id->vid = vid;
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ if (rc) {
+ kfree(id);
+ return rc;
+ }
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
}
spin_unlock_bh(&card->vlanlock);
if (tmpid) {
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
- return 0;
+ return rc;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long))
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- struct qeth_ipa_cmd *cmd;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Smaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETVMAC));
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (cmd->hdr.return_code) {
- case IPA_RC_L2_DUP_MAC:
- case IPA_RC_L2_DUP_LAYER3_MAC:
+ switch (rc) {
+ case -EEXIST:
dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n",
- cmd->data.setdelmac.mac);
+ "MAC address %pM already exists\n", mac);
break;
- case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
- case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ case -EPERM:
dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n",
- cmd->data.setdelmac.mac);
- break;
- default:
+ "MAC address %pM is not authorized\n", mac);
break;
}
- } else {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
- OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- }
- return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "L2Dmaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
- return 0;
}
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
- return 0;
+ return rc;
}
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
- qeth_l2_send_delmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELVMAC));
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
"device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+ if (!rc || (rc == -ENOENT))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (rc)
return rc;
rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
- if (rc)
- return rc;
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = cmdlength;
cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..e2a0ee845399 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (addr->proto == QETH_PROT_IPV6) {
memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- cmd->data.setassparms.hdr.return_code = 0;
- cmd->data.setassparms.hdr.seq_no = 0;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setassparms.hdr.assist_no = ipa_func;
+ cmd->data.setassparms.hdr.length = 8 + len;
+ cmd->data.setassparms.hdr.command_code = cmd_code;
+ cmd->data.setassparms.hdr.return_code = 0;
+ cmd->data.setassparms.hdr.seq_no = 0;
+ }
return iob;
}
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
length, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, length, data,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
QETH_DBF_TEXT(SETUP, 2, "diagtrac");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
IPA_CMD_ASS_ARP_QUERY_INFO,
sizeof(struct qeth_arp_query_data) - sizeof(char),
prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_ADD_ENTRY,
sizeof(struct qeth_arp_cache_entry),
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
12,
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ int rc;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
- qeth_l3_iqd_read_initial_mac(card);
+ rc = qeth_l3_iqd_read_initial_mac(card);
+ if (rc)
+ return rc;
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
@@ -3410,10 +3438,10 @@ contin:
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
netif_tx_disable(card->dev);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3b73b96619e2..26270c351624 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.16"
+#define DRV_VERSION "1.6.0.17"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2097de42a147..155b286f1a9d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
+ /* IO out of order */
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issuing Host reset due to out of order IO\n");
+
+ if (fnic_host_reset(sc) == FAILED) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_host_reset failed.\n");
+ }
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
/*
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df4e27cd996a..9219953ee949 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->eh_comp = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ * @match: match function to use
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+ int (*match)(struct ipr_cmnd *, void *))
+{
+ struct ipr_cmnd *ipr_cmd;
+ int wait;
+ unsigned long flags;
+ struct ipr_hrr_queue *hrrq;
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ ENTER;
+ do {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait)
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
return rc;
}
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
rc = ipr_cancel_op(scsi_cmd);
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
LEAVE;
return rc;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b4f3eec51bc9..ec03b42fa2b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct completion *eh_comp;
struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ca291c1380..cce1cbc1a927 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
- if (time_after(jiffies, fcport->retry_delay_timestamp))
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..9b3829931f40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
return -ENXIO;
if (!get_device(&sdev->sdev_gendev))
return -ENXIO;
- /* We can fail this if we're doing SCSI operations
+ /* We can fail try_module_get if we're doing SCSI operations
* from module exit (like cache flush) */
- try_module_get(sdev->host->hostt->module);
+ __module_get(sdev->host->hostt->module);
return 0;
}
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
-#ifdef CONFIG_MODULE_UNLOAD
- struct module *module = sdev->host->hostt->module;
-
- /* The module refcount will be zero if scsi_device_get()
- * was called from a module removal routine */
- if (module && module_refcount(module) != 0)
- module_put(module);
-#endif
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b8b51bc29b4..4aca1b0378c2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
req_opcode = cmd[3];
req_sa = get_unaligned_be16(cmd + 4);
alloc_len = get_unaligned_be32(cmd + 6);
- if (alloc_len < 4 && alloc_len > 0xffff) {
+ if (alloc_len < 4 || alloc_len > 0xffff) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
return check_condition_result;
}
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
a_len = 8192;
else
a_len = alloc_len;
- arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+ arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
if (NULL == arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e42fff6e8c10..8afb01604d51 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1041,7 +1041,7 @@ retry:
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
- rtn = NEEDS_RETRY;
+ rtn = FAILED;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
rtn = FAILED;
break;
}
- } else if (!rtn) {
+ } else if (rtn != FAILED) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ea95dd3e260..17bb541f7cc2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
{
struct scatterlist *first_chunk = NULL;
- gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
int ret;
BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
}
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
- first_chunk, gfp_mask, scsi_sg_alloc);
+ first_chunk, GFP_ATOMIC, scsi_sg_alloc);
if (unlikely(ret))
scsi_free_sgtable(sdb, mq);
return ret;
@@ -1144,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
- BUG_ON(prot_sdb == NULL);
+ if (prot_sdb == NULL) {
+ /*
+ * This can happen if someone (e.g. multipath)
+ * queues a command to a device on an adapter
+ * that does not support DIX.
+ */
+ WARN_ON_ONCE(1);
+ error = BLKPREP_KILL;
+ goto err_exit;
+ }
+
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fedab3c21ddf..399516925d80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
-
- if (sdkp->lbpws)
+ if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 7281316a5ecb..a67d37c7e3c0 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg);
dws->num_cs = 16;
- dws->fifo_len = 40; /* FIFO has 40 words buffer */
#ifdef CONFIG_SPI_DW_MID_DMA
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d0d5542efc06..8edcd1b84562 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 257; fifo++) {
+ for (fifo = 2; fifo <= 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
- dws->fifo_len = (fifo == 257) ? 0 : fifo;
+ dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
dw_writew(dws, DW_SPI_TXFLTR, 0);
}
}
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
- dev_warn(&master->dev, "DMA init failed\n");
+ dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
}
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index b410499cddca..aad6683db81b 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
- rxconf.src_maxburst = 1;
+ rxconf.src_maxburst = 4;
}
dmaengine_slave_config(spfi->rx_ch, &rxconf);
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
- txconf.dst_maxburst = 1;
+ txconf.dst_maxburst = 4;
break;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
dma_async_issue_pending(spfi->rx_ch);
}
+ spfi_start(spfi);
+
if (xfer->tx_buf) {
spfi->tx_dma_busy = true;
dmaengine_submit(txdesc);
dma_async_issue_pending(spfi->tx_ch);
}
- spfi_start(spfi);
-
return 1;
stop_dma:
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 05c623cfb078..23822e7df6c1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
+ spi_finalize_current_message(drv_data->master);
}
static void reset_sccr1(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 239be7cbe5a8..3ab7a21445fc 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ pm_runtime_get_sync(&p->pdev->dev);
+
if (!np) {
/*
* Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ pm_runtime_put_sync(&p->pdev->dev);
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891bd321a..4f361b77c749 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
inode->i_sb->s_root != NULL &&
- is_root_inode(inode))
+ !is_root_inode(inode))
ll_invalidate_aliases(inode);
iput(inode);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0;
}
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 81784c6f7b88..77d8753f6ba4 100644
--- a/drivers/staging/media/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_TLG2300
tristate "Telegent TLG2300 USB video capture support (Deprecated)"
depends on VIDEO_DEV && I2C && SND && DVB_CORE
+ depends on MEDIA_USB_SUPPORT
select VIDEO_TUNER
select VIDEO_TVEEPROM
depends on RC_CORE
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
- .id = 1,
},
{
.name = "nvec-mouse",
- .id = 1,
},
{
.name = "nvec-power",
- .id = 1,
+ .id = 0,
},
{
.name = "nvec-power",
- .id = 2,
+ .id = 1,
},
{
.name = "nvec-paz00",
- .id = 1,
},
};
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 86c72ba0a0cd..f8c5fc371c4c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index c8f739dd346e..70f870541f92 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
if (pDevice->byCurrentCh == uConnectionChannel)
return bResult;
+ /* Set VGA to max sensitivity */
+ if (pDevice->bUpdateBBVGA &&
+ pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+ pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ }
+
/* clear NAV */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 83e4162c0094..cd1a277d853b 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td = priv->apCurrTD[dma_idx];
- head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
+ head_td->m_td1TD1.byTCR = 0;
head_td->pTDInfo->skb = skb;
@@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
priv->bPWBitOn = false;
+ /* Set TSR1 & ReqCount in TxDescHead */
+ head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+ head_td->m_td1TD1.wReqCount =
+ cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+
head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
if (dma_idx == TYPE_AC0DMA)
@@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
- MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
} else {
- MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
}
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 61c39dd7ad01..b5b0155961f2 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
ptdCurr = (PSTxDesc)pHeadTD;
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+ ptdCurr->pTDInfo->dwReqCount = cbReqCount;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- /* Set TSR1 & ReqCount in TxDescHead */
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
return cbHeaderLength;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 55f6774f706f..aebde3289c50 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
}
if (!strncmp("=All", text_ptr, 4)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
return -ENOMEM;
}
/*
- * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
@@ -3512,7 +3512,7 @@ eob:
if (end_of_buf)
break;
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 09a522bae222..cbcff38ac9b7 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -135,8 +135,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- IFC_SENDTARGETS_ALL = 0x00000100,
- IFC_SENDTARGETS_SINGLE = 0x00000200,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7653cfb027a2..58f49ff69b14 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
EXPORT_SYMBOL(se_dev_set_queue_depth);
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
- int block_size = dev->dev_attrib.block_size;
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!fabric_max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " fabric_max_sectors\n", dev);
- return -EINVAL;
- }
- if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- if (!block_size) {
- block_size = 512;
- pr_warn("Defaulting to 512 for zero block_size\n");
- }
- fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- block_size);
-
- dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, fabric_max_sectors);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+ " greater than hw_max_sectors: %u\n", dev,
+ optimal_sectors, dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
- dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
xcopy_lun = &dev->xcopy_lun;
xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c2aea099ea4a..d836de200a03 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct fd_prot fd_prot;
sense_reason_t rc;
int ret = 0;
-
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitiation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
- &fileio_dev_attrib_fabric_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3efff94fbd97..78346b850968 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
- &iblock_dev_attrib_fabric_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d56f2aaba9af..283cf786ef98 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
return 0;
}
+ } else if (we && registered_nexus) {
+ /*
+ * Reads are allowed for Write Exclusive locks
+ * from all registrants.
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 0;
+ }
}
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 60ebd170a561..98e83ac5661b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
- &rd_mcp_dev_attrib_fabric_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 11bea1952435..cd4bed7b2757 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
-
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.fabric_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
- if (sectors > dev->dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.hw_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 1307600fe726..4c71657da56a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -505,7 +505,6 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u32 max_sectors;
int have_tp = 0;
int opt, min;
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- put_unaligned_be32(max_sectors, &buf[8]);
+ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8bfa61c9693d..1157b559683b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
- &tcmu_dev_attrib_fabric_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index ad09e51ffae4..f65f0d109fc8 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -4,6 +4,8 @@
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
+ * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +30,20 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ * level 0 --> 1st Max Freq
+ * level 1 --> 2nd Max Freq
+ * ...
+ */
+
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
@@ -38,25 +54,27 @@
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ * cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
*
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
+ unsigned int max_level;
+ unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
-static unsigned int cpufreq_dev_count;
-
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
*
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
*/
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+ unsigned int freq)
{
- struct cpufreq_policy policy;
-
- return !cpufreq_get_policy(&policy, cpu);
-}
-
-enum cpufreq_cooling_property {
- GET_LEVEL,
- GET_FREQ,
- GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- * cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
- unsigned int *output,
- enum cpufreq_cooling_property property)
-{
- int i;
- unsigned long max_level = 0, level = 0;
- unsigned int freq = CPUFREQ_ENTRY_INVALID;
- int descend = -1;
- struct cpufreq_frequency_table *pos, *table =
- cpufreq_frequency_get_table(cpu);
-
- if (!output)
- return -EINVAL;
-
- if (!table)
- return -EINVAL;
-
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* get the frequency order */
- if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
- descend = freq > pos->frequency;
-
- freq = pos->frequency;
- max_level++;
- }
-
- /* No valid cpu frequency entry */
- if (max_level == 0)
- return -EINVAL;
+ unsigned long level;
- /* max_level is an index, not a counter */
- max_level--;
+ for (level = 0; level <= cpufreq_dev->max_level; level++) {
+ if (freq == cpufreq_dev->freq_table[level])
+ return level;
- /* get max level */
- if (property == GET_MAXL) {
- *output = (unsigned int)max_level;
- return 0;
+ if (freq > cpufreq_dev->freq_table[level])
+ break;
}
- if (property == GET_FREQ)
- level = descend ? input : (max_level - input);
-
- i = 0;
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* now we have a valid frequency entry */
- freq = pos->frequency;
-
- if (property == GET_LEVEL && (unsigned int)input == freq) {
- /* get level by frequency */
- *output = descend ? i : (max_level - i);
- return 0;
- }
- if (property == GET_FREQ && level == i) {
- /* get frequency by level */
- *output = freq;
- return 0;
- }
- i++;
- }
-
- return -EINVAL;
+ return THERMAL_CSTATE_INVALID;
}
/**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
* @cpu: cpu for which the level is required
* @freq: the frequency of interest
*
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
*/
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
- unsigned int val;
-
- if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
- return THERMAL_CSTATE_INVALID;
-
- return (unsigned long)val;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
- int ret = 0;
- unsigned int freq;
-
- ret = get_property(cpu, level, &freq, GET_FREQ);
- if (ret)
- return 0;
-
- return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- * clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
-{
- unsigned int cpuid, clip_freq;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(mask);
-
-
- /* Check if the old cooling action is same as new cooling action */
- if (cpufreq_device->cpufreq_state == cooling_state)
- return 0;
-
- clip_freq = get_cpu_frequency(cpu, cooling_state);
- if (!clip_freq)
- return -EINVAL;
-
- cpufreq_device->cpufreq_state = cooling_state;
- cpufreq_device->cpufreq_val = clip_freq;
+ struct cpufreq_cooling_device *cpufreq_dev;
- for_each_cpu(cpuid, mask) {
- if (is_cpufreq_valid(cpuid))
- cpufreq_update_policy(cpuid);
+ mutex_lock(&cooling_cpufreq_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ mutex_unlock(&cooling_cpufreq_lock);
+ return get_level(cpufreq_dev, freq);
+ }
}
+ mutex_unlock(&cooling_cpufreq_lock);
- return 0;
+ pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+ return THERMAL_CSTATE_INVALID;
}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
&cpufreq_dev->allowed_cpus))
continue;
- if (!cpufreq_dev->cpufreq_val)
- cpufreq_dev->cpufreq_val = get_cpu_frequency(
- cpumask_any(&cpufreq_dev->allowed_cpus),
- cpufreq_dev->cpufreq_state);
-
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu;
- unsigned int count = 0;
- int ret;
-
- cpu = cpumask_any(mask);
-
- ret = get_property(cpu, 0, &count, GET_MAXL);
- if (count > 0)
- *state = count;
-
- return ret;
+ *state = cpufreq_device->max_level;
+ return 0;
}
/**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ unsigned int clip_freq;
+
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_device->max_level))
+ return -EINVAL;
+
+ /* Check if the old cooling action is same as new cooling action */
+ if (cpufreq_device->cpufreq_state == state)
+ return 0;
- return cpufreq_apply_cooling(cpufreq_device, state);
+ clip_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->cpufreq_state = state;
+ cpufreq_device->cpufreq_val = clip_freq;
+
+ cpufreq_update_policy(cpu);
+
+ return 0;
}
/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
};
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+ unsigned int prev_max)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int max = 0;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency > max && pos->frequency < prev_max)
+ max = pos->frequency;
+ }
+
+ return max;
+}
+
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
- struct cpufreq_cooling_device *cpufreq_dev = NULL;
- unsigned int min = 0, max = 0;
+ struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
- int ret = 0, i;
- struct cpufreq_policy policy;
+ struct cpufreq_frequency_table *pos, *table;
+ unsigned int freq, i;
+ int ret;
- /* Verify that all the clip cpus have same freq_min, freq_max limit */
- for_each_cpu(i, clip_cpus) {
- /* continue if cpufreq policy not found and not return error */
- if (!cpufreq_get_policy(&policy, i))
- continue;
- if (min == 0 && max == 0) {
- min = policy.cpuinfo.min_freq;
- max = policy.cpuinfo.max_freq;
- } else {
- if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
- return ERR_PTR(-EINVAL);
- }
+ table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ if (!table) {
+ pr_debug("%s: CPUFreq table not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
}
- cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+
+ cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ /* Find max levels */
+ cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_dev->max_level++;
+
+ cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+ cpufreq_dev->max_level, GFP_KERNEL);
+ if (!cpufreq_dev->freq_table) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ /* max_level is an index, not a counter */
+ cpufreq_dev->max_level--;
+
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
- kfree(cpufreq_dev);
- return ERR_PTR(-EINVAL);
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,25 +368,44 @@ __cpufreq_cooling_register(struct device_node *np,
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
&cpufreq_cooling_ops);
- if (IS_ERR(cool_dev)) {
- release_idr(&cpufreq_idr, cpufreq_dev->id);
- kfree(cpufreq_dev);
- return cool_dev;
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
+ /* Fill freq-table in descending order of frequencies */
+ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ freq = find_next_max(table, freq);
+ cpufreq_dev->freq_table[i] = freq;
+
+ /* Warn for duplicate entries */
+ if (!freq)
+ pr_warn("%s: table has duplicate entries\n", __func__);
+ else
+ pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+
+ cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
- cpufreq_dev->cpufreq_state = 0;
+
mutex_lock(&cooling_cpufreq_lock);
/* Register the notifier for first cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- cpufreq_dev_count++;
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
+
+remove_idr:
+ release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+ kfree(cpufreq_dev->freq_table);
+free_cdev:
+ kfree(cpufreq_dev);
+
+ return cool_dev;
}
/**
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
- cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 000d53e934a0..607b62c7e611 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -18,7 +18,6 @@
*/
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -28,18 +27,17 @@
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
- struct cpumask mask_val;
-
- /* make sure cpufreq driver has been initialized */
- if (!cpufreq_frequency_get_table(0))
- return -EPROBE_DEFER;
-
- cpumask_set_cpu(0, &mask_val);
- cdev = cpufreq_cooling_register(&mask_val);
+ cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
- return PTR_ERR(cdev);
+ int ret = PTR_ERR(cdev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register cooling device %d\n",
+ ret);
+
+ return ret;
}
platform_set_drvdata(pdev, cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 88b32f942dcf..2ccbc0788353 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
const struct of_device_id *of_id =
of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
- struct cpumask clip_cpus;
struct regmap *map;
int measure_freq;
int ret;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(&pdev->dev, "no cpufreq driver!");
- return -EPROBE_DEFER;
- }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
- cpumask_set_cpu(0, &clip_cpus);
- data->cdev = cpufreq_cooling_register(&clip_cpus);
+ data->cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n",
+ ret);
return ret;
}
@@ -613,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
data->mode = THERMAL_DEVICE_DISABLED;
+ clk_disable_unprepare(data->thermal_clk);
return 0;
}
@@ -622,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
+ clk_prepare_enable(data->thermal_clk);
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ffe40bffaf1a..d4413698a85f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index e4e61b3fb11e..2c2ec7666eb1 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
if (!acpi_has_method(handle, "_TRT"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
continue;
result = acpi_bus_get_device(trt->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
result = acpi_bus_get_device(trt->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get target ACPI device\n");
}
@@ -167,7 +163,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
if (!acpi_has_method(handle, "_ART"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->source) {
result = acpi_bus_get_device(art->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
}
@@ -321,8 +313,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
unsigned long length = 0;
int count = 0;
char __user *arg = (void __user *)__arg;
- struct trt *trts;
- struct art *arts;
+ struct trt *trts = NULL;
+ struct art *arts = NULL;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index dcb306ea14a4..65a98a97df07 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
.remove = int3400_thermal_remove,
.driver = {
.name = "int3400 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(int3400_thermal_match),
},
};
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index a5d08c14ba24..c5cbc3af3a05 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
.remove = int3402_thermal_remove,
.driver = {
.name = "int3402 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3402_thermal_match,
},
};
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 1bfa6a69e77a..0faf500d8a77 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
{
struct int3403_sensor *obj = priv->priv;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify);
thermal_zone_device_unregister(obj->tzone);
return 0;
}
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_WARNING "Invalid PPSS data\n");
+ kfree(buf.pointer);
return -EFAULT;
}
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
priv->priv = obj;
+ kfree(buf.pointer);
/* TODO: add ACPI notification support */
return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 000000000000..0fe5dbbea968
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,311 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+ u32 index;
+ u32 min_uw;
+ u32 max_uw;
+ u32 tmin_us;
+ u32 tmax_us;
+ u32 step_uw;
+};
+
+struct proc_thermal_device {
+ struct device *dev;
+ struct acpi_device *adev;
+ struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+ PROC_THERMAL_NONE,
+ PROC_THERMAL_PCI,
+ PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+ PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pci_dev; \
+ struct platform_device *pdev; \
+ struct proc_thermal_device *proc_dev; \
+\
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+ pdev = to_platform_device(dev); \
+ proc_dev = platform_get_drvdata(pdev); \
+ } else { \
+ pci_dev = to_pci_dev(dev); \
+ proc_dev = pci_get_drvdata(pci_dev); \
+ } \
+ return sprintf(buf, "%lu\n",\
+ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+ &dev_attr_power_limit_0_min_uw.attr,
+ &dev_attr_power_limit_1_min_uw.attr,
+ &dev_attr_power_limit_0_max_uw.attr,
+ &dev_attr_power_limit_1_max_uw.attr,
+ &dev_attr_power_limit_0_step_uw.attr,
+ &dev_attr_power_limit_1_step_uw.attr,
+ &dev_attr_power_limit_0_tmin_us.attr,
+ &dev_attr_power_limit_1_tmin_us.attr,
+ &dev_attr_power_limit_0_tmax_us.attr,
+ &dev_attr_power_limit_1_tmax_us.attr,
+ NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+ .attrs = power_limit_attrs,
+ .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *elements, *ppcc;
+ union acpi_object *p;
+ int i;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ dev_err(dev, "Invalid PPCC data\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ if (!p->package.count) {
+ dev_err(dev, "Invalid PPCC package size\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
+
+ for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+ elements = &(p->package.elements[i+1]);
+ if (elements->type != ACPI_TYPE_PACKAGE ||
+ elements->package.count != 6) {
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ ppcc = elements->package.elements;
+ proc_priv->power_limits[i].index = ppcc[0].integer.value;
+ proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+ proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+ proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+ proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+ proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+ }
+
+ *priv = proc_priv;
+
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+ dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+ return -ENODEV;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+ return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+ dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+ return 0;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ proc_thermal_remove(pci_get_drvdata(pdev));
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = "proc_thermal",
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3401_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&proc_thermal_pci_driver);
+
+ return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index e98b4249187c..6ceebd659dd4 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
+ { X86_VENDOR_INTEL, 6, 0x56},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index e145b66df444..d717f3dab6f1 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
*
* Return: pointer to trip points table, NULL otherwise
*/
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
struct __thermal_zone *data = tz->devdata;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8803e693fe68..2580a4872f90 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- int ctemp;
+ u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
struct device *dev = rcar_priv_to_dev(priv);
int i;
- int ctemp, old, new;
+ u32 ctemp, old, new;
int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
int i;
int ret = -ENODEV;
int idle = IDLE_INTERVAL;
+ u32 enr_bits = 0;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/*
* platform has IRQ support.
- * Then, drier use common register
+ * Then, driver uses common registers
*/
ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(common->base))
return PTR_ERR(common->base);
- /* enable temperature comparation */
- rcar_thermal_common_write(common, ENR, 0x00030303);
-
idle = 0; /* polling delay is not needed */
}
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
rcar_thermal_irq_enable(priv);
list_move_tail(&priv->list, &common->head);
+
+ /* update ENR bits */
+ enr_bits |= 3 << (i * 8);
}
+ /* enable temperature comparation */
+ if (irq)
+ rcar_thermal_common_write(common, ENR, enr_bits);
+
platform_set_drvdata(pdev, common);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 1bcddfc60e91..9c6ce548e363 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
static struct platform_driver rockchip_thermal_driver = {
.driver = {
.name = "rockchip-thermal",
- .owner = THIS_MODULE,
.pm = &rockchip_thermal_pm_ops,
.of_match_table = of_rockchip_thermal_match,
},
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index f760389a204c..c43306ecc0ab 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on ARCH_HAS_BANDGAP && OF
+ depends on OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index b6be572704a4..6dc3815cc73f 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
{
int ret;
- struct cpumask mask_val;
struct exynos_thermal_zone *th_zone;
if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
* sensor
*/
if (sensor_conf->cooling_data.freq_clip_count > 0) {
- cpumask_set_cpu(0, &mask_val);
th_zone->cool_dev[th_zone->cool_dev_size] =
- cpufreq_cooling_register(&mask_val);
+ cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
- dev_err(sensor_conf->dev,
- "Failed to register cpufreq cooling device\n");
- ret = -EINVAL;
+ ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(sensor_conf->dev,
+ "Failed to register cpufreq cooling device: %d\n",
+ ret);
goto err_unregister;
}
th_zone->cool_dev_size++;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d44d91d681d4..d2f1e62a4232 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
/* Register the sensor with thermal management interface */
ret = exynos_register_thermal(sensor_conf);
if (ret) {
- dev_err(&pdev->dev, "Failed to register thermal interface\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register thermal interface: %d\n",
+ ret);
goto err_clk;
}
data->reg_conf = sensor_conf;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 84fdf0792e27..87e0b0782023 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
unsigned long max_state;
- int result;
+ int result, ret;
if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (tz != pos1 || cdev != pos2)
return -EINVAL;
- cdev->ops->get_max_state(cdev, &max_state);
+ ret = cdev->ops->get_max_state(cdev, &max_state);
+ if (ret)
+ return ret;
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9083e7520623..0531c752fbbb 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
{
return 0;
}
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
return NULL;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5fd03865e396..3fb054a10f6a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
-#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
if (!data)
return -EINVAL;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(bgp->dev, "no cpufreq driver yet\n");
- return -EPROBE_DEFER;
- }
-
/* Register cooling device */
data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cool_dev)) {
- dev_err(bgp->dev,
- "Failed to register cpufreq cooling device\n");
- return PTR_ERR(data->cool_dev);
+ int ret = PTR_ERR(data->cool_dev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(bgp->dev,
+ "Failed to register cpu cooling device %d\n",
+ ret);
+
+ return ret;
}
ti_bandgap_set_sensor_data(bgp, id, data);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d2b496750d59..4ddfa60c9222 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- mask |= POLLHUP;
if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
- else if (mask & POLLHUP) {
- tty_flush_to_ldisc(tty);
- if (input_available_p(tty, 1))
- mask |= POLLIN | POLLRDNORM;
- }
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= POLLHUP;
if (tty_hung_up_p(file))
mask |= POLLHUP;
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 31feeb2d0a66..d1f8dc6aabcb 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
}
static int
-pci_wch_ch382_setup(struct serial_private *priv,
+pci_wch_ch38x_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
- /* WCH CH382 2S1P card (16750 clone) */
+ /* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
.device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .setup = pci_wch_ch382_setup,
+ .setup = pci_wch_ch38x_setup,
+ },
+ /* WCH CH384 4S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH384_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
},
/*
* ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch384_4,
};
/*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
+
+ [pbn_wch384_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+ { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
};
/*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch384_4 },
+
/*
* Commtech, Inc. Fastcom adapters
*/
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 19273e31d224..107e80722575 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_COMMON_SERIAL_DRV_DATA \
+ .info = &(struct s3c24xx_uart_info) { \
+ .name = "Samsung Exynos UART", \
+ .type = PORT_S3C6400, \
+ .has_divslot = 1, \
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
+ .def_clk_sel = S3C2410_UCON_CLKSEL0, \
+ .num_clks = 1, \
+ .clksel_mask = 0, \
+ .clksel_shift = 0, \
+ }, \
+ .def_cfg = &(struct s3c2410_uartcfg) { \
+ .ucon = S5PV210_UCON_DEFAULT, \
+ .ufcon = S5PV210_UFCON_DEFAULT, \
+ .has_fracval = 1, \
+ } \
+
static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- .info = &(struct s3c24xx_uart_info) {
- .name = "Samsung Exynos4 UART",
- .type = PORT_S3C6400,
- .has_divslot = 1,
- .rx_fifomask = S5PV210_UFSTAT_RXMASK,
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
- .rx_fifofull = S5PV210_UFSTAT_RXFULL,
- .tx_fifofull = S5PV210_UFSTAT_TXFULL,
- .tx_fifomask = S5PV210_UFSTAT_TXMASK,
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL0,
- .num_clks = 1,
- .clksel_mask = 0,
- .clksel_shift = 0,
- },
- .def_cfg = &(struct s3c2410_uartcfg) {
- .ucon = S5PV210_UCON_DEFAULT,
- .ufcon = S5PV210_UFCON_DEFAULT,
- .has_fracval = 1,
- },
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 16, 16 },
};
+
+static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
+ .fifosize = { 64, 256, 16, 256 },
+};
+
#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
#endif
static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
}, {
.name = "exynos4210-uart",
.driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos5433-uart",
+ .driver_data = EXYNOS5433_SERIAL_DRV_DATA,
},
{ },
};
@@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
.data = (void *)S5PV210_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos4210-uart",
.data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ { .compatible = "samsung,exynos5433-uart",
+ .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 57ca61b14670..984605bb5bf1 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
}
- dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ port->dev ? dev_name(port->dev) : "",
+ port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
address, port->irq, port->uartclk / 16, uart_type(port));
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4f35b43e2475..51f066aa375e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+
tty->count++;
WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
retval = -ENODEV;
filp->f_flags = saved_flags;
- if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
- !capable(CAP_SYS_ADMIN))
- retval = -EBUSY;
-
if (retval) {
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5b9825a4538a..a57dc8866fc5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci)
return -ENOMEM;
- platform_set_drvdata(pdev, ci);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, ci);
ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
ci->platdata->name, ci);
if (ret)
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index c1694cff1eaf..48731d0bab35 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
if (!hcd)
return -ENOMEM;
+ dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
return 0;
+ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+ return 1;
+
/* NOTE: can't use usb_match_id() since interface caches
* aren't set up yet. this is cut/paste from that code.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Protocol and OTG Electrical Test Device */
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(&hsotg->lock);
+
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
- spin_lock(&hsotg->lock);
-
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
}
}
- spin_unlock(&hsotg->lock);
out:
+ spin_unlock(&hsotg->lock);
return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 200168ec2d75..79242008085b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2567,7 +2567,7 @@ error:
* s3c_hsotg_ep_disable - disable given endpoint
* @ep: The endpoint to disable.
*/
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
spin_lock_irqsave(&hsotg->lock, flags);
/* terminate all requests with shutdown */
- kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force);
hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
hs_ep->fifo_index = 0;
@@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
return 0;
}
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+ return s3c_hsotg_ep_disable_force(ep, false);
+}
/**
* on_list - check request is on the given endpoint
* @ep: The endpoint to check.
@@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++)
- s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+ s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true);
spin_lock_irqsave(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7c4faf738747..b642a2f998f9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -33,6 +33,8 @@
#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
+#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
struct dwc3_pci {
struct device *dev;
@@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f03b136ecfce..8f65ab3a3b92 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (i == (request->num_mapped_sgs - 1) ||
sg_is_last(s)) {
- if (list_is_last(&req->list,
- &dep->request_list))
+ if (list_empty(&dep->request_list))
last_one = true;
chain = false;
}
@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (last_one)
break;
}
+
+ if (last_one)
+ break;
} else {
dma = req->request.dma;
length = req->request.length;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 6e04e302dc3a..a1bc3e3a0b09 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
- VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
- "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+ VDBG(cdev,
+ "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
+ __func__, ctrl->bRequestType, ctrl->bRequest, value);
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a90440300735..259b656c0b3e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
req = midi_alloc_ep_req(ep, midi->buflen);
if (!req) {
- ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+ ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
return;
}
req->length = 0;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index f7b203293205..e9715845f82e 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
- gaudio_cleanup(opts->card);
if (opts->fn_play_alloc)
kfree(opts->fn_play);
if (opts->fn_cap_alloc)
@@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f)
struct f_audio *audio = func_to_audio(f);
struct f_uac1_opts *opts;
+ gaudio_cleanup(&audio->card);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index c744e4975d74..db49ec4c748e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
kbuf = memdup_user(buf, len);
if (IS_ERR(kbuf)) {
value = PTR_ERR(kbuf);
+ kbuf = NULL;
goto free1;
}
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
data->name, len, (int) value);
free1:
mutex_unlock(&data->lock);
+ kfree (kbuf);
return value;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index ce882371786b..9f93bed42052 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
- | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+ | USBA_DMA_END_BUF_EN;
- if (ep->is_in)
- req->ctrl |= USBA_DMA_END_BUF_EN;
+ if (!ep->is_in)
+ req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
/*
* Add this request to the queue and submit for DMA if
@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
- struct usba_request *req = to_usba_req(_req);
+ struct usba_request *req;
unsigned long flags;
u32 status;
@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
spin_lock_irqsave(&udc->lock, flags);
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+
+ if (&req->req != _req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
if (req->using_dma) {
/*
* If this request is currently being transferred,
@@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
- usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
}
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ff67ceac77c4..d4fe8d769bd6 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
struct bdc *bdc;
int ret = 0;
- bdc = ep->bdc;
if (!req || !ep || !ep->usb_ep.desc)
return -EINVAL;
+ bdc = ep->bdc;
+
req->usb_req.actual = 0;
req->usb_req.status = -EINPROGRESS;
req->epnum = ep->ep_num;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index e113fd73aeae..f9a332775c47 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
else
next = (now + 2 + 7) & ~0x07; /* full frame cache */
+ /* If needed, initialize last_iso_frame so that this URB will be seen */
+ if (ehci->isoc_count == 0)
+ ehci->last_iso_frame = now >> 3;
+
/*
* Use ehci->last_iso_frame as the base. There can't be any
* TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
*/
now2 = (now - base) & (mod - 1);
- /* Is the schedule already full? */
+ /* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
- ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
- status = -ENOSPC;
+ status = -EFBIG;
goto fail;
}
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
-
- /* Make sure scan_isoc() sees these */
- if (ehci->isoc_count == 0)
- ehci->last_iso_frame = now >> 3;
return status;
fail:
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 19a9af1b4d74..ff9af29b4e9f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
- err = PTR_ERR(u_phy);
+ err = -EPROBE_DEFER;
goto cleanup_clk_en;
}
hcd->usb_phy = u_phy;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index dd483c13565b..ce636466edb7 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
- u32 fminterval;
+ u32 fminterval = 0;
+ bool no_fminterval = false;
int cnt;
if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
if (base == NULL)
return;
+ /*
+ * ULi M5237 OHCI controller locks the whole system when accessing
+ * the OHCI_FMINTERVAL offset.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+ no_fminterval = true;
+
control = readl(base + OHCI_CONTROL);
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
/* software reset of the controller, preserving HcFmInterval */
- fminterval = readl(base + OHCI_FMINTERVAL);
+ if (!no_fminterval)
+ fminterval = readl(base + OHCI_FMINTERVAL);
+
writel(OHCI_HCR, base + OHCI_CMDSTATUS);
/* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
break;
udelay(1);
}
- writel(fminterval, base + OHCI_FMINTERVAL);
+
+ if (!no_fminterval)
+ writel(fminterval, base + OHCI_FMINTERVAL);
/* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 142b601f9563..7f76c8a12f89 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"must be suspended extra slowly",
pdev->revision);
}
+ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
/* Fresco Logic confirms: all revisions of this chip do not
* support MSI, even though some of them claim to in their PCI
* capabilities.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 01fcbb5eb06e..c50d8d202618 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
return -EINVAL;
}
+ if (setup == SETUP_CONTEXT_ONLY) {
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ SLOT_STATE_DEFAULT) {
+ xhci_dbg(xhci, "Slot already in default state\n");
+ return 0;
+ }
+ }
+
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9d68372dd9aa..b005010240e5 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -72,6 +72,8 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
@@ -85,6 +87,7 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
select USB_MUSB_AM335X_CHILD
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
config USB_MUSB_BLACKFIN
@@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "Ux500 platforms"
+ depends on ARCH_U8500 || COMPILE_TEST
config USB_MUSB_JZ4740
tristate "JZ4740"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index a441a2de8619..178250145613 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
bfin_write16(addr + offset, data);
}
-static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
{
bfin_write16(addr + offset, (u16)data);
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f64fd964dc6d..c39a16ad7832 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
ret = of_property_read_string_index(np, "dma-names", i, &str);
if (ret)
goto err;
- if (!strncmp(str, "tx", 2))
+ if (strstarts(str, "tx"))
is_tx = 1;
- else if (!strncmp(str, "rx", 2))
+ else if (strstarts(str, "rx"))
is_tx = 0;
else {
dev_err(dev, "Wrong dmatype %s\n", str);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index ad3701a97389..48131aa8472c 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
{ "RxMaxPp", MUSB_RXMAXP, 16 },
{ "RxCSR", MUSB_RXCSR, 16 },
{ "RxCount", MUSB_RXCOUNT, 16 },
- { "ConfigData", MUSB_CONFIGDATA,8 },
{ "IntrRxE", MUSB_INTRRXE, 16 },
{ "IntrTxE", MUSB_INTRTXE, 16 },
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
- { "BabbleCtl", MUSB_BABBLE_CTL,8 },
- { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
- { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
- { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
- { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x69, 16 },
- { "EPInfo", MUSB_EPINFO, 8 },
- { "RAMInfo", MUSB_RAMINFO, 8 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
{ "DMA_CNTLch7", 0x274, 16 },
{ "DMA_ADDRch7", 0x278, 32 },
{ "DMA_COUNTch7", 0x27C, 32 },
+#ifndef CONFIG_BLACKFIN
+ { "ConfigData", MUSB_CONFIGDATA,8 },
+ { "BabbleCtl", MUSB_BABBLE_CTL,8 },
+ { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
+ { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
+ { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
+ { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
+ { "EPInfo", MUSB_EPINFO, 8 },
+ { "RAMInfo", MUSB_RAMINFO, 8 },
+#endif
{ } /* Terminating Entry */
};
@@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- if (!strncmp(buf, "force host", 9))
+ if (strstarts(buf, "force host"))
test = MUSB_TEST_FORCE_HOST;
- if (!strncmp(buf, "fifo access", 11))
+ if (strstarts(buf, "fifo access"))
test = MUSB_TEST_FIFO_ACCESS;
- if (!strncmp(buf, "force full-speed", 15))
+ if (strstarts(buf, "force full-speed"))
test = MUSB_TEST_FORCE_FS;
- if (!strncmp(buf, "force high-speed", 15))
+ if (strstarts(buf, "force high-speed"))
test = MUSB_TEST_FORCE_HS;
- if (!strncmp(buf, "test packet", 10)) {
+ if (strstarts(buf, "test packet")) {
test = MUSB_TEST_PACKET;
musb_load_testpacket(musb);
}
- if (!strncmp(buf, "test K", 6))
+ if (strstarts(buf, "test K"))
test = MUSB_TEST_K;
- if (!strncmp(buf, "test J", 6))
+ if (strstarts(buf, "test J"))
test = MUSB_TEST_J;
- if (!strncmp(buf, "test SE0 NAK", 12))
+ if (strstarts(buf, "test SE0 NAK"))
test = MUSB_TEST_SE0_NAK;
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 23d474d3d7f4..883a9adfdfff 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
if (musb->port_mode == MUSB_PORT_MODE_GADGET)
return;
usb_remove_hcd(musb->hcd);
- musb->hcd = NULL;
}
void musb_host_free(struct musb *musb)
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 699e38c73d82..697a741a0cb1 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
- struct usb_phy *phy = &mvotg->phy;
int old_state = mvotg->phy.otg->state;
switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- if (mvotg->phy.state != OTG_STATE_B_IDLE) {
+ if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
dev_info(&pdev->dev,
"OTG state is not B_IDLE, it is %d!\n",
- mvotg->phy.state);
+ mvotg->phy.otg->state);
return -EAGAIN;
}
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index b4066a001ba0..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
{
struct usb_phy *phy;
+ if (!of_device_is_available(node))
+ return ERR_PTR(-ENODEV);
+
list_for_each_entry(phy, &phy_list, head) {
if (node != phy->dev->of_node)
continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
return phy;
}
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EPROBE_DEFER);
}
static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
spin_lock_irqsave(&phy_lock, flags);
phy = __of_usb_find_phy(node);
- if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- if (!IS_ERR(phy))
- phy = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(phy)) {
+ devres_free(ptr);
+ goto err1;
+ }
+ if (!try_module_get(phy->dev->driver->owner)) {
+ phy = ERR_PTR(-ENODEV);
devres_free(ptr);
goto err1;
}
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 8d7fc48b1f30..29fa1c3d0089 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -46,6 +46,8 @@ static struct console usbcons;
* ------------------------------------------------------------
*/
+static const struct tty_operations usb_console_fake_tty_ops = {
+};
/*
* The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
goto reset_open_count;
}
kref_init(&tty->kref);
- tty_port_tty_set(&port->port, tty);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
+ init_ldsem(&tty->ldisc_sem);
+ INIT_LIST_HEAD(&tty->tty_files);
+ kref_get(&tty->driver->kref);
+ tty->ops = &usb_console_fake_tty_ops;
if (tty_init_termios(tty)) {
retval = -ENOMEM;
- goto free_tty;
+ goto put_tty;
}
+ tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
- kfree(tty);
+ tty_kref_put(tty);
}
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
}
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
fail:
tty_port_tty_set(&port->port, NULL);
- free_tty:
- kfree(tty);
+ put_tty:
+ tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
usb_autopm_put_interface(serial->interface);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6c4eb3cf5efd..f4c56fc1a9f6 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
- { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1bd192290b08..ccf1df7c4b80 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
res = usb_submit_urb(port->read_urbs[index], mem_flags);
if (res) {
- if (res != -EPERM) {
+ if (res != -EPERM && res != -ENODEV) {
dev_err(&port->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
__func__, urb->status);
return;
default:
- dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+ dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto resubmit;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 077c714f1285..e07b15ed5814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
}
port = serial->port[msg->portNumber];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
port = serial->port[0];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7a4c21b4f676..efdcee15b520 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
#define QUALCOMM_VENDOR_ID 0x05C6
+#define SIERRA_VENDOR_ID 0x1199
+
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
#define CMOTECH_PRODUCT_CMU_300 0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
OPTION_BLACKLIST_RESERVED_IF = 2
};
-#define MAX_BL_NUM 8
+#define MAX_BL_NUM 11
struct option_blacklist_info {
/* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ .sendsetup = BIT(0) | BIT(2),
+ .reserved = BIT(8) | BIT(10) | BIT(11),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cb3e14780a7e..9c63897b3a56 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
{DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
- {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 8a6f371ed6e7..9893d696fc97 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
return 0;
/*
- * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
- * broken on the ASM1051, use the number of streams to differentiate.
- * New ASM1053-s also support 32 streams, but have a different prod-id.
+ * ASMedia has a number of usb3 to sata bridge chips, at the time of
+ * this writing the following versions exist:
+ * ASM1051 - no uas support version
+ * ASM1051 - with broken (*) uas support
+ * ASM1053 - with working uas support
+ * ASM1153 - with working uas support
+ *
+ * Devices with these chips re-use a number of device-ids over the
+ * entire line, so the device-id is useless to determine if we're
+ * dealing with an ASM1051 (which we want to avoid).
+ *
+ * The ASM1153 can be identified by config.MaxPower == 0,
+ * where as the ASM105x models have config.MaxPower == 36.
+ *
+ * Differentiating between the ASM1053 and ASM1051 is trickier, when
+ * connected over USB-3 we can look at the number of streams supported,
+ * ASM1051 supports 32 streams, where as early ASM1053 versions support
+ * 16 streams, newer ASM1053-s also support 32 streams, but have a
+ * different prod-id.
+ *
+ * (*) ASM1051 chips do work with UAS with some disks (with the
+ * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
*/
if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
- le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
- if (udev->speed < USB_SPEED_SUPER) {
+ (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
+ le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
+ if (udev->actconfig->desc.bMaxPower == 0) {
+ /* ASM1153, do nothing */
+ } else if (udev->speed < USB_SPEED_SUPER) {
/* No streams info, assume ASM1051 */
flags |= US_FL_IGNORE_UAS;
} else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+ /* Possibly an ASM1051, disable uas */
flags |= US_FL_IGNORE_UAS;
}
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
"SCM Microsystems",
"eUSB SCSI Adapter (Bus Powered)",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 18a283d6de1c..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -40,6 +40,16 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode. Observed with the 1.28 firmware; are there others?
+ */
+UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+ "Apricorn",
+ "",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
"Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
+UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
"Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
+UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ "Seagate",
+ "BUP Fast HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
"JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
-/* Most ASM1051 based devices have issues with uas, blacklist them all */
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
- "ASMedia",
- "ASM1051",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
-
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
@@ -104,9 +127,23 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+ "JMicron",
+ "JMS566",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
"Hitachi",
"External HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ "SimpleTech",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 255201f22126..7cc0122a18ce 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- u8 type;
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = iommu_group_get(&pdev->dev);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 14419a8ccbb6..d415d69dc237 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
++headcount;
seg += in;
}
- heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb3933f..d695b1673ae5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
return 0;
}
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
static void tcm_vhost_submission_work(struct work_struct *work)
{
struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
- cmd->tvc_task_attr, cmd->tvc_data_direction,
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+ sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed71b5347a76..cb807d0ea498 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
- (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
- (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (sizeof(u64) - 1))) {
r = -EINVAL;
break;
}
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 1c29bd19e3d5..0e5fde1d3ffb 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, start_sector_addr,
data_start_addr, sector_buffer);
if (err)
- return err;
+ goto out;
}
/* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, tail_start_addr,
tail_len, sector_buffer + tail_start_addr);
if (err)
- return err;
+ goto out;
}
/* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
/* first erase the sector */
err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
if (err)
- return err;
+ goto out;
/* now write it */
err = broadsheet_spiflash_write_sector(par, start_sector_addr,
sector_buffer, sector_size);
+out:
+ kfree(sector_buffer);
return err;
}
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4ecd617..d6cab1fd9a47 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
- err = schedule_delayed_work(&info->deferred_work, 0);
+ schedule_delayed_work(&info->deferred_work, 0);
mutex_unlock(&inode->i_mutex);
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 87accdb59c81..ac83ef5cfd7d 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 500000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 500000000,
.clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 620000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 750000000,
.clkdco_low = 1500000000,
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 50bc62c5d367..335ffac224b9 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
return 0;
err_enable:
- regulator_disable(pll->regulator);
+ if (pll->regulator)
+ regulator_disable(pll->regulator);
err_reg:
clk_disable_unprepare(pll->clkin);
return r;
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index d51a983075bc..5c2ccab5a958 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ /* We have SDI only on OMAP3, where it's on port 1 */
+ out->port_num = 1;
out->ops.sdi = &sdi_ops;
out->owner = THIS_MODULE;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 92cac803dee3..1085c0432158 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
for_each_child_of_node(of_chosen, np) {
if (of_device_is_compatible(np, "simple-framebuffer"))
of_platform_device_create(np, NULL, NULL);
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 940cd196eef5..10fbfd8ab963 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,6 +21,21 @@ static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+ logos_freed = true;
+ return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
/* logo's are marked __initdata. Use __init_refok to tell
* modpost that it is intended that this function uses data
* marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
- if (nologo)
+ if (nologo || logos_freed)
return NULL;
if (depth >= 1) {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2ef9529809d8..9756f21b809e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_free_vectors(vdev);
kfree(vp_dev->vqs);
+ vp_dev->vqs = NULL;
}
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-void virtio_pci_release_dev(struct device *_d)
-{
- /*
- * No need for a release method as we allocate/free
- * all devices together with the pci devices.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index adddb647b21d..5a497289b7e9 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - ignore the affinity request if we're using INTX
*/
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6c76f0f5658c..a5486e65e04b 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
};
+static void virtio_pci_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev = dev_to_virtio(_d);
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* As struct device is a kobject, it's not safe to
+ * free the memory (including the reference counter itself)
+ * until it's release callback. */
+ kfree(vp_dev);
+}
+
/* the PCI probing function */
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
- kfree(vp_dev);
}
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 5927c0a98a74..bcfd2a22208f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
.shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
- .owner = THIS_MODULE,
.of_match_table = cdns_wdt_of_match,
.pm = &cdns_wdt_pm_ops,
},
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d6add516a7a7..5142bbabe027 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -52,6 +52,8 @@
#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
+#define IMX2_WDT_WMCR 0x08 /* Misc Register */
+
#define IMX2_WDT_MAX_TIME 128
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
imx2_wdt_ping_if_active(wdog);
+ /*
+ * Disable the watchdog power down counter at boot. Otherwise the power
+ * down counter will pull down the #WDOG interrupt line for one clock
+ * cycle.
+ */
+ regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
ret = watchdog_register_device(wdog);
if (ret) {
dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
- imx2_wdt_ping(wdog);
+ /* The watchdog IP block is running */
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
- /* Watchdog has been stopped but IP block is still running */
- if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
- del_timer_sync(&wdev->timer);
+ /* The watchdog is not active */
+ if (!watchdog_active(wdog))
+ del_timer_sync(&wdev->timer);
+ }
clk_disable_unprepare(wdev->clk);
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
clk_prepare_enable(wdev->clk);
if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
- /* Resumes from deep sleep we need restart
- * the watchdog again.
+ /*
+ * If the watchdog is still active and resumes
+ * from deep sleep state, need to restart the
+ * watchdog again.
*/
imx2_wdt_setup(wdog);
imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
} else if (imx2_wdt_is_running(wdev)) {
+ /* Resuming from non-deep sleep state. */
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
- mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ /*
+ * But the watchdog is not active, then start
+ * the timer again.
+ */
+ if (!watchdog_active(wdog))
+ mod_timer(&wdev->timer,
+ jiffies + wdog->timeout * HZ / 2);
}
return 0;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index ef6a298e8c45..1f4155ee3404 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
.remove = meson_wdt_remove,
.shutdown = meson_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = meson_wdt_dt_ids,
},