summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-ibm-rtl4
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie.txt11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt10
-rw-r--r--Documentation/filesystems/Locking1
-rw-r--r--Documentation/filesystems/vfs.txt1
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile12
-rw-r--r--arch/arc/Makefile7
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts4
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/mcip.c32
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c19
-rw-r--r--arch/arc/mm/dma.c26
-rw-r--r--arch/arc/plat-eznps/smp.c6
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/kvm/arm.c27
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi7
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h40
-rw-r--r--arch/arm64/include/asm/cpufeature.h20
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/kvm/hyp/tlb.c15
-rw-r--r--arch/nios2/kernel/time.c1
-rw-r--r--arch/openrisc/include/asm/cache.h2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/pci/pci_dma.c2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/include/asm/kvm_page_track.h14
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/page_track.c31
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--drivers/acpi/acpi_apd.c10
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_platform.c5
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-xgene.c10
-rw-r--r--drivers/clk/imx/clk-pllv3.c8
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/rockchip/clk-ddr.c5
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c70
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c13
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c19
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c121
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c236
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c10
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c65
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h66
-rw-r--r--drivers/gpu/drm/drm_drv.c38
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c856
-rw-r--r--drivers/gpu/drm/i915/Kconfig26
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile7
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c202
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h63
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c597
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h55
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c180
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c32
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h543
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c608
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c (renamed from drivers/gpu/drm/i915/i915_gem_fence.c)12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c579
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h235
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h338
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c206
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h74
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c39
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c24
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h17
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c638
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h341
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c9
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c30
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c31
-rw-r--r--drivers/gpu/drm/i915/intel_color.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c598
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h28
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c83
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c187
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c139
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c170
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c121
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c9
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c69
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/udl/udl_main.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c102
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c56
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/cma.c54
-rw-r--r--drivers/iommu/arm-smmu-v3.c25
-rw-r--r--drivers/iommu/arm-smmu.c16
-rw-r--r--drivers/iommu/intel-iommu.c14
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c (renamed from drivers/media/usb/dvb-usb/gp8psk-fe.c)139
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.h82
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb/Makefile2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c33
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c33
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c39
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h1
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c40
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c111
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h63
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/mmc/card/mmc_test.c8
-rw-r--r--drivers/mmc/core/mmc.c3
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/sdhci.c36
-rw-r--r--drivers/nfc/mei_phy.c2
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/pci/host/pcie-rockchip.c62
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-rockchip-pcie.c13
-rw-r--r--drivers/phy/phy-sun4i-usb.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c17
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/toshiba-wmi.c26
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c16
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c17
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h8
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/host/pci-quirks.c8
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/uwb/lc-rc.c16
-rw-r--r--drivers/uwb/pal.c2
-rw-r--r--fs/aio.c207
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/coredump.c3
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4session.c12
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/orangefs/orangefs-debugfs.c147
-rw-r--r--fs/orangefs/orangefs-mod.c6
-rw-r--r--fs/splice.c5
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_drv.h3
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/frontswap.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/phy/phy.h7
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/sound/asoc.h6
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/printk/printk.c13
-rw-r--r--lib/stackdepot.c2
-rw-r--r--mm/cma.c3
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/hugetlb.c66
-rw-r--r--mm/kmemleak.c1
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/ceph/ceph_fs.c3
-rw-r--r--net/ceph/osd_client.c1
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c37
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.ubsan4
-rwxr-xr-xscripts/bloat-o-meter3
-rw-r--r--sound/core/info.c9
-rw-r--r--sound/soc/codecs/cs4270.c8
-rw-r--r--sound/soc/codecs/da7219.c3
-rw-r--r--sound/soc/codecs/hdmi-codec.c7
-rw-r--r--sound/soc/codecs/rt298.c5
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/sti-sas.c2
-rw-r--r--sound/soc/codecs/tas571x.c37
-rw-r--r--sound/soc/intel/Kconfig3
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c1
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c4
-rw-r--r--sound/soc/intel/skylake/skl.c8
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/qcom/lpass-cpu.c3
-rw-r--r--sound/soc/qcom/lpass-platform.c165
-rw-r--r--sound/soc/qcom/lpass.h1
-rw-r--r--sound/soc/samsung/ac97.c10
-rw-r--r--sound/soc/samsung/i2s.c19
-rw-r--r--sound/soc/samsung/pcm.c19
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c16
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c14
-rw-r--r--sound/soc/samsung/spdif.c19
-rw-r--r--sound/soc/sti/uniperif_player.c6
-rw-r--r--sound/soc/sunxi/sun4i-codec.c19
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c41
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h14
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
365 files changed, 7219 insertions, 4686 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
index b82deeaec314..470def06ab0a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
+++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
@@ -1,4 +1,4 @@
-What: state
+What: /sys/devices/system/ibm_rtl/state
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and
Users: The ibm-prtm userspace daemon uses this interface.
-What: version
+What: /sys/devices/system/ibm_rtl/version
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 4e00e859e885..bfa461aaac99 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -43,6 +43,9 @@ Optional properties:
reset signal present internally in some host controller IC designs.
See Documentation/devicetree/bindings/reset/reset.txt for details.
+* reset-names: request name for using "resets" property. Must be "reset".
+ (It will be used together with "resets" property.)
+
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
@@ -103,6 +106,8 @@ board specific portions as listed below.
interrupts = <0 75 0>;
#address-cells = <1>;
#size-cells = <0>;
+ resets = <&rst 20>;
+ reset-names = "reset";
};
[board specific internal DMA resources]
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
index ba67b39939c1..71aeda1ca055 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
@@ -26,13 +26,16 @@ Required properties:
- "sys"
- "legacy"
- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following names
- "core"
- "mgmt"
- "mgmt-sticky"
- "pipe"
+ - "pm"
+ - "aclk"
+ - "pclk"
- pinctrl-names : The pin control state names
- pinctrl-0: The "default" pinctrl state
- #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
phys = <&pcie_phy>;
phy-names = "pcie-phy";
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index f9753c416974..b24583aa34c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -14,11 +14,6 @@ Required properies:
- #size-cells : The value of this property must be 1
- ranges : defines mapping between pin controller node (parent) to
gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
- which includes IRQ mux selection register, and the offset of the IRQ mux
- selection register.
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
specify pins.
@@ -37,6 +32,11 @@ Required properties:
Optional properties:
- reset: : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+ which includes IRQ mux selection register, and the offset of the IRQ mux
+ selection register.
Example:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 14cdc101d165..1b5f15653b1b 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -447,7 +447,6 @@ prototypes:
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index d619c8d71966..b5039a00caaf 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -828,7 +828,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/MAINTAINERS b/MAINTAINERS
index 520002480058..30c873192458 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9373,7 +9373,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Keith Busch <keith.busch@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: arch/x86/pci/vmd.c
+F: drivers/pci/host/vmd.c
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
diff --git a/Makefile b/Makefile
index f97f786de58d..247430abfc73 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
@@ -620,7 +620,6 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +628,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os
+KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
else
KBUILD_CFLAGS += -O2
endif
endif
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+ $(call cc-disable-warning,maybe-uninitialized,))
+
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 864adad52280..19cce226d1a8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
# Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 6ae2c476ad82..53ce226f77a5 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -71,7 +71,7 @@
reg-io-width = <4>;
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index ce0ccd20b5bf..5ee96b067c08 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -69,7 +69,7 @@
};
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index bcf603142a33..3c391ba565ed 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -83,5 +83,9 @@
reg = <0xf0003000 0x44>;
interrupts = <7>;
};
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
};
};
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 7314f538847b..b0066a749d4c 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 65ab9fbf83f2..ebe9ebb92933 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 3b3990cddbe1..4bde43278be6 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 98cf20933bbb..f6fb3d26557e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ddf8b96d494e..b9f0fe00044b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ceb90745326e..6da71ba253a9 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7f3f9f63708c..1bd24ec3e350 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -43,12 +43,14 @@
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 89fdd1b0a76e..0861007d9ef3 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
- * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
*/
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index f1e07c2344f8..3b67f538f142 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
+ else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+ arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index c424d5abc318..f39142acc89e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
{
unsigned long flags;
cpumask_t online;
+ unsigned int destination_bits;
+ unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
- idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+ destination_bits = cpumask_bits(&online)[0];
+ idu_set_dest(data->hwirq, destination_bits);
+
+ if (ffs(destination_bits) == fls(destination_bits))
+ distribution_mode = IDU_M_DISTRI_DEST;
+ else
+ distribution_mode = IDU_M_DISTRI_RR;
+
+ idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
};
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
- struct irq_domain *domain = irq_desc_get_handler_data(desc);
- unsigned int core_irq = irq_desc_get_irq(desc);
- unsigned int idu_irq;
+ struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+ irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
- idu_irq = core_irq - idu_first_irq;
- generic_handle_irq(irq_find_mapping(domain, idu_irq));
+ generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
struct irq_domain *domain;
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
- int i, irq;
+ int i, virq;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
- irq = irq_of_parse_and_map(intc, i);
+ virq = irq_of_parse_and_map(intc, i);
if (!i)
- idu_first_irq = irq;
+ idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
- irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+ irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 59aa43cb146e..a41a79a4f4fe 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
- int uval;
- int ret;
+ struct pt_regs *regs = current_pt_regs();
+ int uval = -EFAULT;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+ /* Z indicates to userspace if operation succeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
- ret = __get_user(uval, uaddr);
- if (ret)
+ if (__get_user(uval, uaddr))
goto done;
- if (uval != expected)
- ret = -EAGAIN;
- else
- ret = __put_user(new, uaddr);
+ if (uval == expected) {
+ if (!__put_user(new, uaddr))
+ regs->status32 |= STATUS_Z_MASK;
+ }
done:
preempt_enable();
- return ret;
+ return uval;
}
void arch_cpu_idle(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f183cc648851..88674d972c9d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i;
/*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
+ * if platform didn't set the present map already, do it now
+ * boot cpu is set to present already by init/main.c
*/
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
+ if (num_present_cpus() <= 1) {
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
*/
static DEFINE_PER_CPU(int, ipi_dev);
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
+ unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+ if (!virq)
+ panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
- rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+ rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
if (rc)
- panic("Percpu IRQ request failed for %d\n", irq);
+ panic("Percpu IRQ request failed for %u\n", virq);
}
- enable_percpu_irq(irq, 0);
+ enable_percpu_irq(virq, 0);
return 0;
}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f927b8dc6edd..c10390d1ddb6 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
cycle_t full;
} stamp;
-
- __asm__ __volatile(
- "1: \n"
- " lr %0, [AUX_RTC_LOW] \n"
- " lr %1, [AUX_RTC_HIGH] \n"
- " lr %2, [AUX_RTC_CTRL] \n"
- " bbit0.nt %2, 31, 1b \n"
- : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ stamp.low = read_aux_reg(AUX_RTC_LOW);
+ stamp.high = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
return stamp.full;
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 60aab5a7522b..cd8aad8226dd 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long off = vma->vm_pgoff;
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
+ .mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
index 5e901f86e4bd..56a4c8522f11 100644
--- a/arch/arc/plat-eznps/smp.c
+++ b/arch/arc/plat-eznps/smp.c
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
mtm_enable_core(cpu);
}
-static void eznps_ipi_clear(int irq)
-{
- write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = eznps_init_cpumasks,
.cpu_kick = eznps_smp_wakeup_cpu,
.ipi_send = eznps_ipi_send,
.init_per_cpu = eznps_init_per_cpu,
- .ipi_clear = eznps_ipi_clear,
};
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index d7ea6bcb29bf..8ef05381984b 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 2d19e02d03fd..d5423ab15ed5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -57,6 +57,9 @@ struct kvm_arch {
/* VTTBR value associated with below pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* Timer */
struct arch_timer_kvm timer;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 343135ede5fa..58508900c4bb 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -71,6 +71,7 @@
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f2ad58..19b5f5c1c0ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int ret = 0;
+ int ret, cpu;
if (type)
return -EINVAL;
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+ if (!kvm->arch.last_vcpu_ran)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
return ret;
}
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int *last_ran;
+
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+ /*
+ * We might get preempted before the vCPU actually runs, but
+ * over-invalidation doesn't affect correctness.
+ */
+ if (*last_ran != vcpu->vcpu_id) {
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+ *last_ran = vcpu->vcpu_id;
+ }
+
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 729652854f90..6d810af2d9fd 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
__kvm_tlb_flush_vmid(kvm);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ isb();
+
+ write_sysreg(0, TLBIALL);
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, VTTBR);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index b65c193dc64e..7afbfb0f96a3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -300,8 +300,11 @@
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
status = "disabled";
pcie0_intc: interrupt-controller {
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..87b446535185
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,40 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
+#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
+#define ARM64_HAS_VIRT_HOST_EXTN 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+
+#define ARM64_NCAPS 16
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a27c3245ba21..0bc0b1de90c4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
+#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -24,25 +25,6 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
-
-#define ARM64_NCAPS 16
-
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 18f746551bf6..ec3553eb9349 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bd94e6766759..e5050388e062 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -62,6 +62,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* The maximum number of vCPUs depends on the used GIC model */
int max_vcpus;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a79b969c26fc..6f72fe8b0e3e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
return v;
}
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* We currently only support a 40bit IPA.
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9cc0ea784ae6..88e2f2b938f0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalle1" : : );
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index d9563ddb337e..746bf5caaffc 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
ret = nios2_clocksource_init(timer);
break;
default:
+ ret = 0;
break;
}
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
index 4ce7a01a252d..5f55da9cbfd5 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
@@ -23,6 +23,8 @@
* they shouldn't be hard-coded!
*/
+#define __ro_after_init __read_mostly
+
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 28f03ca60100..794bebb43d23 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -363,11 +363,11 @@ out:
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
- diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
static void diag224_delete_name_table(void)
{
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
}
static int diag224_idx2name(int index, char *name)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 000e6e91f6a0..3667d20e997f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
+ __start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
+ __end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7350c8bc13a2..6b2f72f523b9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- unsigned long pa;
+ unsigned long pa = 0;
int ret;
size = PAGE_ALIGN(size);
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 0ab5ee1c26af..aa8b0672f87a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24a235c..d74747b031ec 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -29,9 +29,20 @@ struct kvm_page_track_notifier_node {
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
+ * @node: this node
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes);
+ int bytes, struct kvm_page_track_notifier_node *node);
+ /*
+ * It is called when memory slot is being moved or removed
+ * users can drop write-protection for the pages in that memory slot
+ *
+ * @kvm: the kvm where memory slot being moved or removed
+ * @slot: the memory slot being moved or removed
+ * @node: this node
+ */
+ void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
};
void kvm_page_track_init(struct kvm *kvm);
@@ -58,4 +69,5 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c7364bd633e1..51287cd90bf6 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
if (apm_info.get_power_status_broken)
return APM_32_UNSUPPORTED;
- if (apm_bios_call(&call))
+ if (apm_bios_call(&call)) {
+ if (!call.err)
+ return APM_NO_ERROR;
return call.err;
+ }
*status = call.ebx;
*bat = call.ecx;
if (apm_info.get_power_status_swabinminutes) {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e986b4e4..87c5880ba3b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4405,7 +4405,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ const u8 *new, int bytes,
+ struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@@ -4617,11 +4618,19 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu);
}
+static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+}
+
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
+ node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539c3714..4a1c13eaa518 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -106,6 +106,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm_flush_remote_tlbs(kvm);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
/*
* remove the guest page from the tracking pool which stops the interception
@@ -135,6 +136,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
@@ -181,6 +183,7 @@ kvm_page_track_register_notifier(struct kvm *kvm,
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
/*
* stop receiving the event interception. It is the opposed operation of
@@ -199,6 +202,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
/*
* Notify the node that write access is intercepted and write emulation is
@@ -222,6 +226,31 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
- n->track_write(vcpu, gpa, new, bytes);
+ n->track_write(vcpu, gpa, new, bytes, n);
+ srcu_read_unlock(&head->track_srcu, idx);
+}
+
+/*
+ * Notify the node that memory slot is being removed or moved so that it can
+ * drop write-protection for the pages in the memory slot.
+ *
+ * The node should figure out it has any write-protected pages in this slot
+ * by itself.
+ */
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_page_track_notifier_head *head;
+ struct kvm_page_track_notifier_node *n;
+ int idx;
+
+ head = &kvm->arch.track_notifier_head;
+
+ if (hlist_empty(&head->track_notifier_list))
+ return;
+
+ idx = srcu_read_lock(&head->track_srcu);
+ hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+ if (n->track_flush_slot)
+ n->track_flush_slot(kvm, slot, n);
srcu_read_unlock(&head->track_srcu, idx);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3017de0431bd..7e30c720d0c5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8155,7 +8155,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ kvm_page_track_flush_slot(kvm, slot);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d58fbf7f04e6..7dd70927991e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
int ret;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev))
return 1;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 552010288135..373657f7e35a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
dev_desc = (const struct lpss_device_desc *)id->driver_data;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
return 1;
}
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b200ae1f3c6f..b4c1a6a51da4 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @properties: Optional collection of build-in properties.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
*
* Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.properties = properties;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..86364097e236 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
return 1;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 035ac646d8db..3d1856f1f4d0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
- acpi_create_platform_device(device);
+ acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
blocking_notifier_call_chain(&acpi_reconfig_chain,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d22a7260f42b..d76cd97a98b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
- bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ !drv->suppress_bind_attrs;
if (defer_all_probes) {
/*
@@ -383,7 +384,7 @@ re_probe:
if (test_remove) {
test_remove = false;
- if (dev->bus && dev->bus->remove)
+ if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e44944f4be77..2932a5bd892f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
return n;
}
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios. Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
- struct bio_vec bv;
- struct page *page;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- /* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel.
- */
- page = compound_head(bv.bv_page);
- page_ref_inc(page);
- }
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
- struct page *page;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- page = compound_head(bv.bv_page);
- page_ref_dec(page);
- }
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
- bio_pageinc(bio);
}
static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
if (buf == d->ip.buf)
d->ip.buf = NULL;
rq = buf->rq;
- bio_pagedec(buf->bio);
mempool_free(buf, d->bufpool);
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
drbd_update_congested(connection);
}
do {
- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 19a16b2dbb91..7a1048755914 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return -EINVAL;
sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
- if (!sreq)
+ if (IS_ERR(sreq))
return -ENOMEM;
mutex_unlock(&nbd->tx_lock);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d23368874710..6af1ce04b3da 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
}
if (pp->pdev) {
- const char *name = pp->pdev->name;
-
parport_unregister_device(pp->pdev);
- kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 20b105584f82..80ae2a51452d 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
+ unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
continue;
if (rate < min_rate)
continue;
+ if (rate > max_rate)
+ continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
- u64 pct80_rate;
+ u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
return NULL;
}
- pct80_rate = clk_get_rate(div->clk);
- pct80_rate *= 8;
+ max_rate = clk_get_rate(div->clk);
+ pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
else
min_rate = plat_rate / 2;
- return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
- return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 5daddf5ecc4b..bc37030e38ba 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
- phys_addr_t reg;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
- reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
- pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
- pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 19f9b622981a..7a6acc3e4a92 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
temp64 *= mfn;
do_div(temp64, mfd);
- return (parent_rate * div) + (u32)temp64;
+ return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- return parent_rate * div + parent_rate * mfn / mfd;
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 3a51fff1b0e7..9adaf48aea23 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 87f2317b2a00..f110c02e83cb 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index e22a67f76d93..64d1ef49caeb 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apbcp_base = of_iomap(np, 3);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
return;
}
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 8feba93672c5..e8075359366b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
ddrclk->ddr_flag = ddr_flag;
clk = clk_register(NULL, &ddrclk->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: could not register ddrclk %s\n", __func__, name);
+ if (IS_ERR(clk))
kfree(ddrclk);
- return NULL;
- }
return clk;
}
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 96fab6cfb202..6c6afb87b4ce 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -132,28 +132,34 @@ free_clkout:
pr_err("%s: failed to register clkout clock\n", __func__);
}
+/*
+ * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
+ * the OF_POPULATED flag on the pmu device tree node, so later the
+ * Exynos PMU platform device can be properly probed with PMU driver.
+ */
+
static void __init exynos4_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
exynos5_clkout_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 2f9f96cc9f65..06879d1dcabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
{
int i, ret;
struct device *dev;
-
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 017556ca22e6..7ded61e6dd81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -809,10 +809,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TOPAZ:
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+ strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
+ ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+ strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 3af8ffb45b64..8d1cf2d3e663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->ddc_bus->has_aux) {
+ if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6bb4d9e9afe4..42da6163b893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -742,8 +742,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
- amdgpu_sync_init();
- amdgpu_fence_slab_init();
+ int r;
+
+ r = amdgpu_sync_init();
+ if (r)
+ goto error_sync;
+
+ r = amdgpu_fence_slab_init();
+ if (r)
+ goto error_fence;
+
+ r = amd_sched_fence_slab_init();
+ if (r)
+ goto error_sched;
+
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -755,6 +767,15 @@ static int __init amdgpu_init(void)
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
+
+error_sched:
+ amdgpu_fence_slab_fini();
+
+error_fence:
+ amdgpu_sync_fini();
+
+error_sync:
+ return r;
}
static void __exit amdgpu_exit(void)
@@ -763,6 +784,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ad908612aff9..d1cf9ac0dff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0))
flags |= AMD_IS_PX;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 52d0a83e6ad1..0b21e7beda91 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -80,7 +80,9 @@
#include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 14f8c1f4da3d..0723758ed065 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
- return -EINVAL;
+ return false;
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 2ba7937d2545..e03dcb6ea9c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t vol;
int ret = 0;
- if (hwmgr->chip_id < CHIP_POLARIS10) {
- atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
if (*voltage >= 2000 || *voltage == 0)
*voltage = 1150;
} else {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 9e49f2777143..28e748d688e2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1474,19 +1474,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info == NULL)
- return -EINVAL;
-
- sclk_table = table_info->vdd_dep_on_sclk;
-
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
- if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+ if ((hwmgr->pp_table_version == PP_TABLE_V1)
+ && !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddgfx_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ if (table_info == NULL)
+ return -EINVAL;
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -1512,12 +1512,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
}
} else {
-
if ((hwmgr->pp_table_version == PP_TABLE_V0)
|| !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ if (table_info == NULL)
+ return -EINVAL;
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -2147,9 +2150,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (tab) {
+ vddc = tab->vddc;
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
&data->vddc_leakage);
tab->vddc = vddc;
+ vddci = tab->vddci;
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
&data->vddci_leakage);
tab->vddci = vddci;
@@ -4247,18 +4252,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
+ struct phm_clock_voltage_dependency_table *sclk_table;
int i;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_sclk_table = table_info->vdd_dep_on_sclk;
-
- for (i = 0; i < dep_sclk_table->count; i++) {
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
- clocks->count++;
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
+ return -EINVAL;
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < sclk_table->count; i++) {
+ clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
+
return 0;
}
@@ -4280,17 +4293,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
(struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
int i;
+ struct phm_clock_voltage_dependency_table *mclk_table;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_mclk_table = table_info->vdd_dep_on_mclk;
-
- for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
- clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL)
+ return -EINVAL;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
- clocks->count++;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ for (i = 0; i < mclk_table->count; i++) {
+ clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index fb6c6f6106d5..29d0319b22e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
- return 0;
+ return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 09b2cf6ccfa4..1bf83ed113b3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
-
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
@@ -619,13 +616,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
- if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
- sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!sched_fence_slab)
- return -ENOMEM;
- }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -646,7 +636,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
- rcu_barrier();
- if (atomic_dec_and_test(&sched_fence_slab_ref))
- kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 876aa43b57df..d8dc681bcda6 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,9 +30,6 @@
struct amd_gpu_scheduler;
struct amd_sched_rq;
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 91530e25aaff..33f54d0a5c4f 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(sched_fence_slab);
+}
+
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner)
{
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ffd673615772..a18f156c8b66 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,5 +1,5 @@
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o
+ armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a51f8cbcfe26..95cb3966b2ca 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -18,6 +18,7 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_trace.h"
struct armada_frame_work {
struct armada_plane_work work;
@@ -164,19 +165,37 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
}
}
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ u32 pixel_format = fb->pixel_format;
+ int num_planes = drm_format_num_planes(pixel_format);
+ int i;
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ for (i = 0; i < num_planes; i++)
+ addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < 3; i++)
+ addrs[i] = 0;
+}
+
static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
int x, int y, struct armada_regs *regs, bool interlaced)
{
- struct armada_gem_object *obj = drm_fb_obj(fb);
unsigned pitch = fb->pitches[0];
- unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
- uint32_t addr_odd, addr_even;
+ u32 addrs[3], addr_odd, addr_even;
unsigned i = 0;
DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
pitch, x, y, fb->bits_per_pixel);
- addr_odd = addr_even = obj->dev_addr + offset;
+ armada_drm_plane_calc_addrs(addrs, fb, x, y);
+
+ addr_odd = addr_even = addrs[0];
if (interlaced) {
addr_even += pitch;
@@ -192,17 +211,18 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
}
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct armada_plane *plane)
+ struct drm_plane *plane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
if (work) {
- work->fn(dcrtc, plane, work);
+ work->fn(dcrtc, dplane, work);
drm_crtc_vblank_put(&dcrtc->crtc);
}
- wake_up(&plane->frame_wait);
+ wake_up(&dplane->frame_wait);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
@@ -307,14 +327,12 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
-
/*
* Tell the DRM core that vblank IRQs aren't going to happen for
* a while. This cleans up any pending vblank events for us.
*/
drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, plane);
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -416,10 +434,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
- if (ovl_plane) {
- struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (ovl_plane)
+ armada_drm_plane_work_run(dcrtc, ovl_plane);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -449,10 +465,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ) {
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (stat & GRA_FRAME_IRQ)
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -466,6 +480,8 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ trace_armada_drm_irq(&dcrtc->crtc, stat);
+
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
@@ -531,6 +547,35 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+ unsigned i;
+ u32 ctrl0;
+
+ i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
+
+ armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
+
+ ctrl0 = state->ctrl0;
+ if (interlaced)
+ ctrl0 |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_end(regs, i);
+ armada_drm_crtc_update_regs(dcrtc, regs);
+}
+
/* The mode_config.mutex will be held for this call */
static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
@@ -547,9 +592,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
- x, y, regs, interlaced);
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
+ drm_to_armada_plane(crtc->primary)->state.src_hw =
+ drm_to_armada_plane(crtc->primary)->state.dst_hw =
+ adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+ drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
@@ -625,8 +681,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
@@ -638,22 +692,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
}
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- if (interlaced)
- val |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
- LCD_SPU_DMA_CTRL0);
-
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
@@ -662,6 +700,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
+
+ armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
armada_drm_crtc_update(dcrtc);
@@ -1038,7 +1078,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* interrupt, so complete it now.
*/
if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
return 0;
}
@@ -1172,7 +1212,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
- writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 04fdd22d483b..b08043e8cc3b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -41,10 +41,18 @@ struct armada_plane_work {
struct armada_plane_work *);
};
+struct armada_plane_state {
+ u32 src_hw;
+ u32 dst_hw;
+ u32 dst_yx;
+ u32 ctrl0;
+};
+
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
struct armada_plane_work *work;
+ struct armada_plane_state state;
};
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
@@ -54,6 +62,8 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y);
struct armada_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index d4f7ab0a30d4..90222e60d2d6 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -113,7 +113,7 @@ static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
struct drm_info_node *node;
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
+ if (!node) {
debugfs_remove(ent);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 3b2bb6128d40..77952d559a3c 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -53,6 +53,7 @@ struct armada_variant {
extern const struct armada_variant armada510_ops;
struct armada_private {
+ struct drm_device drm;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 94e46da9a758..07086b427c22 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -49,106 +49,6 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static int armada_drm_load(struct drm_device *dev, unsigned long flags)
-{
- struct armada_private *priv;
- struct resource *mem = NULL;
- int ret, n;
-
- for (n = 0; ; n++) {
- struct resource *r = platform_get_resource(dev->platformdev,
- IORESOURCE_MEM, n);
- if (!r)
- break;
-
- /* Resources above 64K are graphics memory */
- if (resource_size(r) > SZ_64K)
- mem = r;
- else
- return -EINVAL;
- }
-
- if (!mem)
- return -ENXIO;
-
- if (!devm_request_mem_region(dev->dev, mem->start,
- resource_size(mem), "armada-drm"))
- return -EBUSY;
-
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate private\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(dev->platformdev, dev);
- dev->dev_private = priv;
-
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
- /* Mode setting support */
- drm_mode_config_init(dev);
- dev->mode_config.min_width = 320;
- dev->mode_config.min_height = 200;
-
- /*
- * With vscale enabled, the maximum width is 1920 due to the
- * 1920 by 3 lines RAM
- */
- dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 2048;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = &armada_drm_mode_config_funcs;
- drm_mm_init(&priv->linear, mem->start, resource_size(mem));
- mutex_init(&priv->linear_lock);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret)
- goto err_kms;
-
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto err_comp;
-
- dev->irq_enabled = true;
-
- ret = armada_fbdev_init(dev);
- if (ret)
- goto err_comp;
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-
- err_comp:
- component_unbind_all(dev->dev, dev);
- err_kms:
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
-
- return ret;
-}
-
-static int armada_drm_unload(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- drm_kms_helper_poll_fini(dev);
- armada_fbdev_fini(dev);
-
- component_unbind_all(dev->dev, dev);
-
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- dev->dev_private = NULL;
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -186,16 +86,10 @@ static const struct file_operations armada_drm_fops = {
};
static struct drm_driver armada_drm_driver = {
- .load = armada_drm_load,
.lastclose = armada_drm_lastclose,
- .unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = armada_drm_debugfs_init,
- .debugfs_cleanup = armada_drm_debugfs_cleanup,
-#endif
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -218,12 +112,138 @@ static struct drm_driver armada_drm_driver = {
static int armada_drm_bind(struct device *dev)
{
- return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+ struct armada_private *priv;
+ struct resource *mem = NULL;
+ int ret, n;
+
+ for (n = 0; ; n++) {
+ struct resource *r = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
+ "armada-drm"))
+ return -EBUSY;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The drm_device structure must be at the start of
+ * armada_private for drm_dev_unref() to work correctly.
+ */
+ BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
+
+ ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
+ if (ret) {
+ dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
+ __func__, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->drm.platformdev = to_platform_device(dev);
+ priv->drm.dev_private = priv;
+
+ platform_set_drvdata(priv->drm.platformdev, &priv->drm);
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(&priv->drm);
+ priv->drm.mode_config.min_width = 320;
+ priv->drm.mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ priv->drm.mode_config.max_width = 1920;
+ priv->drm.mode_config.max_height = 2048;
+
+ priv->drm.mode_config.preferred_depth = 24;
+ priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+ mutex_init(&priv->linear_lock);
+
+ ret = component_bind_all(dev, &priv->drm);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
+ if (ret)
+ goto err_comp;
+
+ priv->drm.irq_enabled = true;
+
+ ret = armada_fbdev_init(&priv->drm);
+ if (ret)
+ goto err_comp;
+
+ drm_kms_helper_poll_init(&priv->drm);
+
+ ret = drm_dev_register(&priv->drm, 0);
+ if (ret)
+ goto err_poll;
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_init(priv->drm.primary);
+#endif
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ armada_drm_driver.name, armada_drm_driver.major,
+ armada_drm_driver.minor, armada_drm_driver.patchlevel,
+ armada_drm_driver.date, dev_name(dev),
+ priv->drm.primary->index);
+
+ return 0;
+
+ err_poll:
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+ err_comp:
+ component_unbind_all(dev, &priv->drm);
+ err_kms:
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ drm_dev_unref(&priv->drm);
+ return ret;
}
static void armada_drm_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct armada_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_cleanup(priv->drm.primary);
+#endif
+ drm_dev_unregister(&priv->drm);
+
+ component_unbind_all(dev, &priv->drm);
+
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ drm_dev_unref(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 806791897304..768087ddb046 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -212,7 +212,7 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return obj;
}
-struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
@@ -419,7 +419,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/* Prime support */
-struct sg_table *
+static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
@@ -594,11 +594,7 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
- DMA_TO_DEVICE);
- if (!dobj->sgt) {
- DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
- return -EINVAL;
- }
+ DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 152b4e716269..6743615232f5 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -15,6 +15,7 @@
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_trace.h"
struct armada_ovl_plane_properties {
uint32_t colorkey_yr;
@@ -32,10 +33,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
struct drm_framebuffer *old_fb;
- uint32_t src_hw;
- uint32_t dst_hw;
- uint32_t dst_yx;
- uint32_t ctrl0;
struct {
struct armada_plane_work work;
struct armada_regs regs[13];
@@ -87,6 +84,8 @@ static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
{
struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
armada_ovl_retire_fb(dplane, NULL);
}
@@ -120,6 +119,10 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
bool visible;
int ret;
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
DRM_ROTATE_0,
0, INT_MAX, true, false, &visible);
@@ -141,22 +144,22 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
- dplane->src_hw = val;
+ dplane->base.state.src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->dst_hw = val;
+ dplane->base.state.dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = dest.y1 << 16 | dest.x1;
- dplane->dst_yx = val;
+ dplane->base.state.dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
- } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
dcrtc->base + LCD_SPU_SRAM_PARA1);
@@ -166,9 +169,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
if (plane->fb != fb) {
- struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t addr[3], pixel_format;
- int i, num_planes, hsub;
+ u32 addrs[3], pixel_format;
+ int num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -182,6 +184,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
src_y = src.y1 >> 16;
src_x = src.x1 >> 16;
+ armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
+
pixel_format = fb->pixel_format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
num_planes = drm_format_num_planes(pixel_format);
@@ -194,24 +198,17 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (src_x & (hsub - 1) && num_planes == 1)
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
- for (i = 0; i < num_planes; i++)
- addr[i] = obj->dev_addr + fb->offsets[i] +
- src_y * fb->pitches[i] +
- src_x * drm_format_plane_cpp(pixel_format, i);
- for (; i < ARRAY_SIZE(addr); i++)
- addr[i] = 0;
-
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -223,28 +220,28 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
}
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
- if (dplane->src_hw != val) {
- dplane->src_hw = val;
+ if (dplane->base.state.src_hw != val) {
+ dplane->base.state.src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- if (dplane->dst_hw != val) {
- dplane->dst_hw = val;
+ if (dplane->base.state.dst_hw != val) {
+ dplane->base.state.dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = dest.y1 << 16 | dest.x1;
- if (dplane->dst_yx != val) {
- dplane->dst_yx = val;
+ if (dplane->base.state.dst_yx != val) {
+ dplane->base.state.dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
- if (dplane->ctrl0 != ctrl0) {
- dplane->ctrl0 = ctrl0;
+ if (dplane->base.state.ctrl0 != ctrl0) {
+ dplane->base.state.ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
@@ -275,7 +272,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane)
armada_drm_crtc_plane_disable(dcrtc, plane);
dcrtc->plane = NULL;
- dplane->ctrl0 = 0;
+ dplane->base.state.ctrl0 = 0;
fb = xchg(&dplane->old_fb, NULL);
if (fb)
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
new file mode 100644
index 000000000000..068b336ba75f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -0,0 +1,4 @@
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "armada_trace.h"
+#endif
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
new file mode 100644
index 000000000000..dc0cba70fd1a
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -0,0 +1,66 @@
+#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ARMADA_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM armada
+#define TRACE_INCLUDE_FILE armada_trace
+
+TRACE_EVENT(armada_drm_irq,
+ TP_PROTO(struct drm_crtc *crtc, u32 stat),
+ TP_ARGS(crtc, stat),
+ TP_STRUCT__entry(
+ __field(struct drm_crtc *, crtc)
+ __field(u32, stat)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->stat = stat;
+ ),
+ TP_printk("crtc %p stat 0x%08x",
+ __entry->crtc, __entry->stat)
+);
+
+TRACE_EVENT(armada_ovl_plane_update,
+ TP_PROTO(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h),
+ TP_ARGS(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ __field(struct drm_framebuffer *, fb)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ __entry->fb = fb;
+ ),
+ TP_printk("plane %p crtc %p fb %p",
+ __entry->plane, __entry->crtc, __entry->fb)
+);
+
+TRACE_EVENT(armada_ovl_plane_work,
+ TP_PROTO(struct drm_crtc *crtc, struct drm_plane *plane),
+ TP_ARGS(crtc, plane),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("plane %p crtc %p",
+ __entry->plane, __entry->crtc)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index cc6c2530764b..f74b7d06ec01 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -303,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. Finally when everything is up
- * and running and ready for userspace the device instance can be published
- * using drm_dev_register().
+ * initialize all the corresponding hardware bits. An important part of this is
+ * also calling drm_dev_set_unique() to set the userspace-visible unique name of
+ * this device instance. Finally when everything is up and running and ready for
+ * userspace the device instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -327,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
-static int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- if (!name)
- return -EINVAL;
-
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -754,6 +744,26 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @name: unique name
+ *
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 728990fee4ef..336be31ff3de 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3653,32 +3653,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
EXPORT_SYMBOL(drm_av_sync_delay);
/**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder && connector->eld[0])
- return connector;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 027521f30b6e..86f47e190309 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -41,12 +41,15 @@ struct tda998x_priv {
struct i2c_client *hdmi;
struct mutex mutex;
u16 rev;
+ u8 cec_addr;
u8 current_page;
- int dpms;
- bool is_hdmi_sink;
+ bool is_on;
+ bool supports_infoframes;
+ bool sink_has_audio;
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
+ unsigned long tmds_clock;
struct tda998x_audio_params audio_params;
struct platform_device *audio_pdev;
@@ -105,6 +108,8 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_PREFILT BIT(0)
+# define FEAT_POWERDOWN_CSC BIT(1)
# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
@@ -370,35 +375,46 @@ struct tda998x_priv {
static void
cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
{
- struct i2c_client *client = priv->cec;
u8 buf[] = {addr, val};
+ struct i2c_msg msg = {
+ .addr = priv->cec_addr,
+ .len = 2,
+ .buf = buf,
+ };
int ret;
- ret = i2c_master_send(client, buf, sizeof(buf));
+ ret = i2c_transfer(priv->hdmi->adapter, &msg, 1);
if (ret < 0)
- dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+ dev_err(&priv->hdmi->dev, "Error %d writing to cec:0x%x\n",
+ ret, addr);
}
static u8
cec_read(struct tda998x_priv *priv, u8 addr)
{
- struct i2c_client *client = priv->cec;
u8 val;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cec_addr,
+ .len = 1,
+ .buf = &addr,
+ }, {
+ .addr = priv->cec_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &val,
+ },
+ };
int ret;
- ret = i2c_master_send(client, &addr, sizeof(addr));
- if (ret < 0)
- goto fail;
-
- ret = i2c_master_recv(client, &val, sizeof(val));
- if (ret < 0)
- goto fail;
+ ret = i2c_transfer(priv->hdmi->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&priv->hdmi->dev, "Error %d reading from cec:0x%x\n",
+ ret, addr);
+ val = 0;
+ }
return val;
-
-fail:
- dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
- return 0;
}
static int
@@ -579,9 +595,9 @@ tda998x_reset(struct tda998x_priv *priv)
* HPD assertion: it needs a delay of 100ms to avoid timing out while
* trying to read EDID data.
*
- * However, tda998x_encoder_get_modes() may be called at any moment
+ * However, tda998x_connector_get_modes() may be called at any moment
* after tda998x_connector_detect() indicates that we are connected, so
- * we need to delay probing modes in tda998x_encoder_get_modes() after
+ * we need to delay probing modes in tda998x_connector_get_modes() after
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
@@ -630,28 +646,30 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
bool handled = false;
sta = cec_read(priv, REG_CEC_INTSTATUS);
- cec = cec_read(priv, REG_CEC_RXSHPDINT);
- lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
- flag0 = reg_read(priv, REG_INT_FLAGS_0);
- flag1 = reg_read(priv, REG_INT_FLAGS_1);
- flag2 = reg_read(priv, REG_INT_FLAGS_2);
- DRM_DEBUG_DRIVER(
- "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
- sta, cec, lvl, flag0, flag1, flag2);
-
- if (cec & CEC_RXSHPDINT_HPD) {
- if (lvl & CEC_RXSHPDLEV_HPD)
- tda998x_edid_delay_start(priv);
- else
- schedule_work(&priv->detect_work);
-
- handled = true;
- }
+ if (sta & CEC_INTSTATUS_HDMI) {
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+
+ if (cec & CEC_RXSHPDINT_HPD) {
+ if (lvl & CEC_RXSHPDLEV_HPD)
+ tda998x_edid_delay_start(priv);
+ else
+ schedule_work(&priv->detect_work);
+
+ handled = true;
+ }
- if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
- priv->wq_edid_wait = 0;
- wake_up(&priv->wq_edid);
- handled = true;
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ handled = true;
+ }
}
return IRQ_RETVAL(handled);
@@ -700,6 +718,8 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
}
+/* Audio support */
+
static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
@@ -713,8 +733,7 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
static int
tda998x_configure_audio(struct tda998x_priv *priv,
- struct tda998x_audio_params *params,
- unsigned mode_clock)
+ struct tda998x_audio_params *params)
{
u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
u32 n;
@@ -771,7 +790,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* assume 100MHz requires larger divider.
*/
adiv = AUDIO_DIV_SERCLK_8;
- if (mode_clock > 100000)
+ if (priv->tmds_clock > 100000)
adiv++; /* AUDIO_DIV_SERCLK_16 */
/* S/PDIF asks for a larger divider */
@@ -819,58 +838,281 @@ tda998x_configure_audio(struct tda998x_priv *priv,
return tda998x_write_aif(priv, &params->cea);
}
-/* DRM encoder functions */
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ int i, ret;
+ struct tda998x_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ memcpy(audio.status, params->iec.status,
+ min(sizeof(audio.status), sizeof(params->iec.status)));
-static void tda998x_encoder_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+ daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_I2S)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_SPDIF)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (audio.config == 0) {
+ dev_err(dev, "%s: No audio configuration found\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->audio_mutex);
+ if (priv->supports_infoframes && priv->sink_has_audio)
+ ret = tda998x_configure_audio(priv, &audio);
+ else
+ ret = 0;
+
+ if (ret == 0)
+ priv->audio_params = audio;
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- priv->audio_params = p->audio_params;
+ mutex_lock(&priv->audio_mutex);
+
+ reg_write(priv, REG_ENA_AP, 0);
+
+ priv->audio_params.format = AFMT_UNUSED;
+
+ mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- /* we only care about on or off: */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ mutex_lock(&priv->audio_mutex);
- if (mode == priv->dpms)
- return;
+ tda998x_audio_mute(priv, enable);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* enable video ports, audio will be enabled later */
- reg_write(priv, REG_ENA_VP_0, 0xff);
- reg_write(priv, REG_ENA_VP_1, 0xff);
- reg_write(priv, REG_ENA_VP_2, 0xff);
- /* set muxing after enabling ports: */
- reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
- reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
- reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
- break;
- case DRM_MODE_DPMS_OFF:
- /* disable video ports */
- reg_write(priv, REG_ENA_VP_0, 0x00);
- reg_write(priv, REG_ENA_VP_1, 0x00);
- reg_write(priv, REG_ENA_VP_2, 0x00);
- break;
+ mutex_unlock(&priv->audio_mutex);
+ return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+ memcpy(buf, priv->connector.eld,
+ min(sizeof(priv->connector.eld), len));
+ mutex_unlock(&priv->audio_mutex);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = tda998x_audio_hw_params,
+ .audio_shutdown = tda998x_audio_shutdown,
+ .digital_mute = tda998x_audio_digital_mute,
+ .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 2,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+ if (priv->audio_port[i].format == AFMT_I2S &&
+ priv->audio_port[i].config != 0)
+ codec_data.i2s = 1;
+ if (priv->audio_port[i].format == AFMT_SPDIF &&
+ priv->audio_port[i].config != 0)
+ codec_data.spdif = 1;
}
- priv->dpms = mode;
+ priv->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
+/* DRM connector functions */
+
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ else
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static int tda998x_connector_fill_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ int ret;
+
+ mutex_lock(&priv->audio_mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+
+ if (connector->edid_blob_ptr) {
+ struct edid *edid = (void *)connector->edid_blob_ptr->data;
+
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ } else {
+ priv->sink_has_audio = false;
+ }
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = tda998x_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = tda998x_connector_fill_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
+{
+ struct tda998x_priv *priv = data;
+ u8 offset, segptr;
+ int ret, i;
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(priv, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 100; i > 0; i--) {
+ msleep(1);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
+ }
+
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ struct edid *edid;
+ int n;
+
+ /*
+ * If we get killed while waiting for the HPD timeout, return
+ * no modes found: we are not in a restartable path, so we
+ * can't handle signals gracefully.
+ */
+ if (tda998x_edid_delay_wait(priv))
+ return 0;
+
+ if (priv->rev == TDA19988)
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
+
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
+
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ kfree(edid);
+
+ return n;
}
static int tda998x_connector_mode_valid(struct drm_connector *connector,
@@ -888,6 +1130,80 @@ static int tda998x_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static int tda998x_connector_init(struct tda998x_priv *priv,
+ struct drm_device *drm)
+{
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ connector->interlace_allowed = 1;
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_helper_add(connector, &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, connector, &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+}
+
+/* DRM encoder functions */
+
+static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ bool on;
+
+ /* we only care about on or off: */
+ on = mode == DRM_MODE_DPMS_ON;
+
+ if (on == priv->is_on)
+ return;
+
+ if (on) {
+ /* enable video ports, audio will be enabled later */
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+
+ priv->is_on = true;
+ } else {
+ /* disable video ports */
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
+
+ priv->is_on = false;
+ }
+}
+
static void
tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -971,6 +1287,8 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
div = 3;
}
+ mutex_lock(&priv->audio_mutex);
+
/* mute the audio FIFO: */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -982,6 +1300,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* no pre-filter or interpolator: */
reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_PREFILT);
reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
@@ -1004,6 +1323,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* set color matrix bypass flag: */
reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
MAT_CONTRL_MAT_SC(1));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
/* set BIAS tmds value: */
reg_write(priv, REG_ANA_GENERAL, 0x09);
@@ -1064,8 +1384,22 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* must be last register set: */
reg_write(priv, REG_TBG_CNTRL_0, 0);
- /* Only setup the info frames if the sink is HDMI */
- if (priv->is_hdmi_sink) {
+ priv->tmds_clock = adjusted_mode->clock;
+
+ /* CEA-861B section 6 says that:
+ * CEA version 1 (CEA-861) has no support for infoframes.
+ * CEA version 2 (CEA-861A) supports version 1 AVI infoframes,
+ * and optional basic audio.
+ * CEA version 3 (CEA-861B) supports version 1 and 2 AVI infoframes,
+ * and optional digital audio, with audio infoframes.
+ *
+ * Since we only support generation of version 2 AVI infoframes,
+ * ignore CEA version 2 and below (iow, behave as if we're a
+ * CEA-861 source.)
+ */
+ priv->supports_infoframes = priv->connector.display_info.cea_rev >= 3;
+
+ if (priv->supports_infoframes) {
/* We need to turn HDMI HDCP stuff on to get audio through */
reg &= ~TBG_CNTRL_1_DWIN_DIS;
reg_write(priv, REG_TBG_CNTRL_1, reg);
@@ -1074,127 +1408,12 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
tda998x_write_avi(priv, adjusted_mode);
- if (priv->audio_params.format != AFMT_UNUSED) {
- mutex_lock(&priv->audio_mutex);
- tda998x_configure_audio(priv,
- &priv->audio_params,
- adjusted_mode->clock);
- mutex_unlock(&priv->audio_mutex);
- }
+ if (priv->audio_params.format != AFMT_UNUSED &&
+ priv->sink_has_audio)
+ tda998x_configure_audio(priv, &priv->audio_params);
}
-}
-static enum drm_connector_status
-tda998x_connector_detect(struct drm_connector *connector, bool force)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
-
- return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
-{
- struct tda998x_priv *priv = data;
- u8 offset, segptr;
- int ret, i;
-
- offset = (blk & 1) ? 128 : 0;
- segptr = blk / 2;
-
- reg_write(priv, REG_DDC_ADDR, 0xa0);
- reg_write(priv, REG_DDC_OFFS, offset);
- reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
- reg_write(priv, REG_DDC_SEGM, segptr);
-
- /* enable reading EDID: */
- priv->wq_edid_wait = 1;
- reg_write(priv, REG_EDID_CTRL, 0x1);
-
- /* flag must be cleared by sw: */
- reg_write(priv, REG_EDID_CTRL, 0x0);
-
- /* wait for block read to complete: */
- if (priv->hdmi->irq) {
- i = wait_event_timeout(priv->wq_edid,
- !priv->wq_edid_wait,
- msecs_to_jiffies(100));
- if (i < 0) {
- dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
- return i;
- }
- } else {
- for (i = 100; i > 0; i--) {
- msleep(1);
- ret = reg_read(priv, REG_INT_FLAGS_2);
- if (ret < 0)
- return ret;
- if (ret & INT_FLAGS_2_EDID_BLK_RD)
- break;
- }
- }
-
- if (i == 0) {
- dev_err(&priv->hdmi->dev, "read edid timeout\n");
- return -ETIMEDOUT;
- }
-
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
- if (ret != length) {
- dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
- blk, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int tda998x_connector_get_modes(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- struct edid *edid;
- int n;
-
- /*
- * If we get killed while waiting for the HPD timeout, return
- * no modes found: we are not in a restartable path, so we
- * can't handle signals gracefully.
- */
- if (tda998x_edid_delay_wait(priv))
- return 0;
-
- if (priv->rev == TDA19988)
- reg_clear(priv, REG_TX4, TX4_PD_RAM);
-
- edid = drm_do_get_edid(connector, read_edid_block, priv);
-
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
-
- if (!edid) {
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- return 0;
- }
-
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- drm_edid_to_eld(connector, edid);
-
- kfree(edid);
-
- return n;
-}
-
-static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- if (priv->hdmi->irq)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ mutex_unlock(&priv->audio_mutex);
}
static void tda998x_destroy(struct tda998x_priv *priv)
@@ -1215,146 +1434,6 @@ static void tda998x_destroy(struct tda998x_priv *priv)
i2c_unregister_device(priv->cec);
}
-static int tda998x_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- int i, ret;
- struct tda998x_audio_params audio = {
- .sample_width = params->sample_width,
- .sample_rate = params->sample_rate,
- .cea = params->cea,
- };
-
- if (!priv->encoder.crtc)
- return -ENODEV;
-
- memcpy(audio.status, params->iec.status,
- min(sizeof(audio.status), sizeof(params->iec.status)));
-
- switch (daifmt->fmt) {
- case HDMI_I2S:
- if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
- daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
- return -EINVAL;
- }
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_I2S)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_I2S;
- break;
- case HDMI_SPDIF:
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_SPDIF)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_SPDIF;
- break;
- default:
- dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
- return -EINVAL;
- }
-
- if (audio.config == 0) {
- dev_err(dev, "%s: No audio configutation found\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&priv->audio_mutex);
- ret = tda998x_configure_audio(priv,
- &audio,
- priv->encoder.crtc->hwmode.clock);
-
- if (ret == 0)
- priv->audio_params = audio;
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
-static void tda998x_audio_shutdown(struct device *dev, void *data)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- reg_write(priv, REG_ENA_AP, 0);
-
- priv->audio_params.format = AFMT_UNUSED;
-
- mutex_unlock(&priv->audio_mutex);
-}
-
-static int
-tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- tda998x_audio_mute(priv, enable);
-
- mutex_unlock(&priv->audio_mutex);
- return 0;
-}
-
-static int tda998x_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- struct drm_mode_config *config = &priv->encoder.dev->mode_config;
- struct drm_connector *connector;
- int ret = -ENODEV;
-
- mutex_lock(&config->mutex);
- list_for_each_entry(connector, &config->connector_list, head) {
- if (&priv->encoder == connector->encoder) {
- memcpy(buf, connector->eld,
- min(sizeof(connector->eld), len));
- ret = 0;
- }
- }
- mutex_unlock(&config->mutex);
-
- return ret;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = tda998x_audio_hw_params,
- .audio_shutdown = tda998x_audio_shutdown,
- .digital_mute = tda998x_audio_digital_mute,
- .get_eld = tda998x_audio_get_eld,
-};
-
-static int tda998x_audio_codec_init(struct tda998x_priv *priv,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .ops = &audio_codec_ops,
- .max_i2s_channels = 2,
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
- if (priv->audio_port[i].format == AFMT_I2S &&
- priv->audio_port[i].config != 0)
- codec_data.i2s = 1;
- if (priv->audio_port[i].format == AFMT_SPDIF &&
- priv->audio_port[i].config != 0)
- codec_data.spdif = 1;
- }
-
- priv->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(priv->audio_pdev);
-}
-
/* I2C driver functions */
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
@@ -1404,22 +1483,21 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- unsigned short cec_addr;
+
+ mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ priv->cec_addr = 0x34 + (client->addr & 0x03);
priv->current_page = 0xff;
priv->hdmi = client;
- /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
- cec_addr = 0x34 + (client->addr & 0x03);
- priv->cec = i2c_new_dummy(client->adapter, cec_addr);
+ priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
if (!priv->cec)
return -ENODEV;
- priv->dpms = DRM_MODE_DPMS_OFF;
-
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
@@ -1479,7 +1557,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* initialize the optional IRQ */
if (client->irq) {
- int irqf_trigger;
+ unsigned long irq_flags;
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
@@ -1489,11 +1567,11 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
reg_read(priv, REG_INT_FLAGS_1);
reg_read(priv, REG_INT_FLAGS_2);
- irqf_trigger =
+ irq_flags =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
ret = request_threaded_irq(client->irq, NULL,
- tda998x_irq_thread,
- irqf_trigger | IRQF_ONESHOT,
+ tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
dev_err(&client->dev,
@@ -1520,8 +1598,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->vip_cntrl_2 = video;
}
- mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
-
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
@@ -1568,44 +1644,25 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static struct drm_encoder *
-tda998x_connector_best_encoder(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- return &priv->encoder;
-}
-
-static
-const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
- .get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
- .best_encoder = tda998x_connector_best_encoder,
-};
-
-static void tda998x_connector_destroy(struct drm_connector *connector)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- drm_connector_cleanup(connector);
-}
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
+ priv->audio_params = p->audio_params;
}
-static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tda998x_connector_detect,
- .destroy = tda998x_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct tda998x_encoder_params *params = dev->platform_data;
@@ -1630,7 +1687,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
crtcs = 1 << 0;
}
- priv->connector.interlace_allowed = 1;
priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, priv);
@@ -1638,9 +1694,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
return ret;
if (!dev->of_node && params)
- tda998x_encoder_set_config(priv, params);
-
- tda998x_encoder_set_polling(priv, &priv->connector);
+ tda998x_set_config(priv, params);
drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1648,16 +1702,10 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_encoder;
- drm_connector_helper_add(&priv->connector,
- &tda998x_connector_helper_funcs);
- ret = drm_connector_init(drm, &priv->connector,
- &tda998x_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = tda998x_connector_init(priv, drm);
if (ret)
goto err_connector;
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
-
return 0;
err_connector:
@@ -1685,6 +1733,10 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_warn(&client->dev, "adapter does not support I2C\n");
+ return -EIO;
+ }
return component_add(&client->dev, &tda998x_ops);
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index df96aed6975a..5ddde7349fbd 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,15 +36,20 @@ config DRM_I915
If "M" is selected, the module will be called i915.
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
+config DRM_I915_ALPHA_SUPPORT
+ bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
default n
help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
+ Choose this option if you have new Intel hardware and want to enable
+ the alpha quality i915 driver support for the hardware in this kernel
+ version. You can also enable the support at runtime using the module
+ parameter i915.alpha_support=1; this option changes the default for
+ that module parameter.
+
+ It is recommended to upgrade to a kernel version with proper support
+ as soon as it is available. Generally fixes for platforms with alpha
+ support are not backported to older kernels.
If in doubt, say "N".
@@ -107,6 +112,15 @@ config DRM_I915_GVT
If in doubt, say "N".
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM/VFIO support for Intel GVT-g"
+ depends on DRM_I915_GVT
+ depends on KVM
+ default n
+ help
+ Choose this option if you want to enable KVMGT support for
+ Intel GVT-g.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0857e5035f4d..3dea46af9fe6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,7 +33,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
- i915_gem_fence.o \
+ i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
@@ -45,6 +45,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_trace_points.o \
+ i915_vma.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
intel_hangcheck.o \
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 34ea4776af70..8a46a7f31d53 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -3,5 +3,8 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+
+CFLAGS_kvmgt.o := -Wno-unused-function
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4c687740f5f1..db516382a4d4 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -47,11 +47,9 @@ enum {
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
-
if (WARN_ON(bytes > 4))
return -EINVAL;
@@ -82,9 +80,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
- vgpu_aperture_sz(vgpu)
- >> PAGE_SHIFT, map,
- GVT_MAP_APERTURE);
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
if (ret)
return ret;
@@ -235,10 +232,9 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
int ret;
if (WARN_ON(bytes > 4))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0084ece8d8ff..d26a092c70e8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1418,8 +1418,8 @@ static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
- sizeof(u32));
+ int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+ sizeof(u32);
unsigned long gma, gma_high;
int ret = 0;
@@ -2537,7 +2537,8 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
- if (bypass_scan_mask & (1 << workload->ring_id))
+ if ((bypass_scan_mask & (1 << workload->ring_id)) ||
+ gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 7e1da1c563ca..bda85dff7b2a 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -502,8 +502,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* ACK of I2C_WRITE
* returned byte if it is READ
*/
-
- aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
+ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index de366b1d5196..f6dfc8b795ec 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -44,7 +44,7 @@
#define GVT_AUX_I2C_READ 0x1
#define GVT_AUX_I2C_STATUS 0x2
#define GVT_AUX_I2C_MOT 0x4
-#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
+#define GVT_AUX_I2C_REPLY_ACK 0x0
struct intel_vgpu_edid_data {
bool data_valid;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index c1f6019d8895..f32bb6f6495c 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -838,23 +838,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long ring_bitmap)
+ unsigned long engine_mask)
{
- int bit;
- struct list_head *pos, *n;
- struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
- for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
- if (bit >= I915_NUM_ENGINES)
- break;
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
/* free the unsubmited workload in the queue */
- list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
- workload = container_of(pos,
- struct intel_vgpu_workload, list);
- list_del_init(&workload->list);
- free_workload(workload);
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
}
- init_vgpu_execlist(vgpu, bit);
+ init_vgpu_execlist(vgpu, engine->id);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 635f31c6dcc1..7eced40a1e30 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -183,6 +183,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long ring_bitmap);
+ unsigned long engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6554da9f9f5b..7eaaf1c9ed2b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -138,36 +138,6 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
memcpy(&(e)->val64, &v, sizeof(v)); \
} while (0)
-enum {
- GTT_TYPE_INVALID = -1,
-
- GTT_TYPE_GGTT_PTE,
-
- GTT_TYPE_PPGTT_PTE_4K_ENTRY,
- GTT_TYPE_PPGTT_PTE_2M_ENTRY,
- GTT_TYPE_PPGTT_PTE_1G_ENTRY,
-
- GTT_TYPE_PPGTT_PTE_ENTRY,
-
- GTT_TYPE_PPGTT_PDE_ENTRY,
- GTT_TYPE_PPGTT_PDP_ENTRY,
- GTT_TYPE_PPGTT_PML4_ENTRY,
-
- GTT_TYPE_PPGTT_ROOT_ENTRY,
-
- GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
- GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
-
- GTT_TYPE_PPGTT_ENTRY,
-
- GTT_TYPE_PPGTT_PTE_PT,
- GTT_TYPE_PPGTT_PDE_PT,
- GTT_TYPE_PPGTT_PDP_PT,
- GTT_TYPE_PPGTT_PML4_PT,
-
- GTT_TYPE_MAX,
-};
-
/*
* Mappings between GTT_TYPE* enumerations.
* Following information can be found according to the given type:
@@ -842,13 +812,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
+ intel_gvt_gtt_type_t cur_pt_type;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
return -EINVAL;
- if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
- return 0;
-
+ if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (ops->get_pfn(e) ==
+ vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+ return 0;
+ }
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) {
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
@@ -999,7 +974,7 @@ fail:
}
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
- struct intel_gvt_gtt_entry *we, unsigned long index)
+ unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
@@ -1008,34 +983,35 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry e;
int ret;
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
- we->val64, index);
-
ppgtt_get_shadow_entry(spt, &e, index);
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ index);
+
if (!ops->test_present(&e))
return 0;
- if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
return 0;
- if (gtt_type_is_pt(get_next_pt_type(we->type))) {
- struct intel_vgpu_guest_page *g =
- intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
- if (!g) {
+ if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ struct intel_vgpu_ppgtt_spt *s =
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ if (!s) {
gvt_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
- ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
+ ret = ppgtt_invalidate_shadow_page(s);
if (ret)
goto fail;
}
- ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
+ ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, we->val64, we->type);
+ vgpu->id, spt, e.val64, e.type);
return ret;
}
@@ -1256,23 +1232,16 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry ge;
- int old_present, new_present;
int ret;
+ int new_present;
- ppgtt_get_guest_entry(spt, &ge, index);
-
- old_present = ops->test_present(&ge);
new_present = ops->test_present(we);
- ppgtt_set_guest_entry(spt, we, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ goto fail;
- if (old_present) {
- ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
- if (ret)
- goto fail;
- }
if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret)
@@ -1318,7 +1287,7 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_ppgtt_spt *spt;
- struct intel_gvt_gtt_entry ge, e;
+ struct intel_gvt_gtt_entry ge;
unsigned long index;
int ret;
@@ -1329,9 +1298,6 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
for_each_set_bit(index, spt->post_shadow_bitmap,
GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index);
- e = ge;
- e.val64 = 0;
- ppgtt_set_guest_entry(spt, &e, index);
ret = ppgtt_handle_guest_write_page_table(
&spt->guest_page, &ge, index);
@@ -1359,8 +1325,6 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
ppgtt_get_guest_entry(spt, &we, index);
- memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
- p_data, bytes);
ops->test_pse(&we);
@@ -1369,19 +1333,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
if (ret)
return ret;
} else {
- struct intel_gvt_gtt_entry ge;
-
- ppgtt_get_guest_entry(spt, &ge, index);
-
if (!test_bit(index, spt->post_shadow_bitmap)) {
- ret = ppgtt_handle_guest_entry_removal(gpt,
- &ge, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
if (ret)
return ret;
}
ppgtt_set_post_shadow(spt, index);
- ppgtt_set_guest_entry(spt, &we, index);
}
if (!enable_out_of_sync)
@@ -1921,47 +1879,101 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
-static int create_scratch_page(struct intel_vgpu *vgpu)
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
- void *p;
- void *vaddr;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ int page_entry_num = GTT_PAGE_SIZE >>
+ vgpu->gvt->device_info.gtt_entry_size_shift;
+ struct page *scratch_pt;
unsigned long mfn;
+ int i;
+ void *p;
+
+ if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ return -EINVAL;
- gtt->scratch_page = alloc_page(GFP_KERNEL);
- if (!gtt->scratch_page) {
- gvt_err("Failed to allocate scratch page.\n");
+ scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!scratch_pt) {
+ gvt_err("fail to allocate scratch page\n");
return -ENOMEM;
}
- /* set to zero */
- p = kmap_atomic(gtt->scratch_page);
- memset(p, 0, PAGE_SIZE);
+ p = kmap_atomic(scratch_pt);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+ kunmap_atomic(p);
+ __free_page(scratch_pt);
+ return -EFAULT;
+ }
+ gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page = scratch_pt;
+ gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+ vgpu->id, type, mfn);
+
+ /* Build the tree by full filled the scratch pt with the entries which
+ * point to the next level scratch pt or scratch page. The
+ * scratch_pt[type] indicate the scratch pt/scratch page used by the
+ * 'type' pt.
+ * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+ */
+ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ struct intel_gvt_gtt_entry se;
+
+ memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+ se.type = get_entry_type(type - 1);
+ ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+ /* The entry parameters like present/writeable/cache type
+ * set to the same as i915's scratch page tree.
+ */
+ se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ if (type == GTT_TYPE_PPGTT_PDE_PT)
+ se.val64 |= PPAT_CACHED_INDEX;
+
+ for (i = 0; i < page_entry_num; i++)
+ ops->set_entry(p, &se, i, false, 0, vgpu);
+ }
+
kunmap_atomic(p);
- /* translate page to mfn */
- vaddr = page_address(gtt->scratch_page);
- mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
+ return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i;
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
- __free_page(gtt->scratch_page);
- gtt->scratch_page = NULL;
- return -ENXIO;
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ __free_page(vgpu->gtt.scratch_pt[i].page);
+ vgpu->gtt.scratch_pt[i].page = NULL;
+ vgpu->gtt.scratch_pt[i].page_mfn = 0;
+ }
}
- gtt->scratch_page_mfn = mfn;
- gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
return 0;
}
-static void release_scratch_page(struct intel_vgpu *vgpu)
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
{
- if (vgpu->gtt.scratch_page != NULL) {
- __free_page(vgpu->gtt.scratch_page);
- vgpu->gtt.scratch_page = NULL;
- vgpu->gtt.scratch_page_mfn = 0;
+ int i, ret;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ ret = alloc_scratch_pages(vgpu, i);
+ if (ret)
+ goto err;
}
+
+ return 0;
+
+err:
+ release_scratch_page_tree(vgpu);
+ return ret;
}
/**
@@ -1995,7 +2007,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
gtt->ggtt_mm = ggtt_mm;
- return create_scratch_page(vgpu);
+ return create_scratch_page_tree(vgpu);
}
/**
@@ -2014,7 +2026,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
struct intel_vgpu_mm *mm;
ppgtt_free_all_shadow_page(vgpu);
- release_scratch_page(vgpu);
+ release_scratch_page_tree(vgpu);
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, list);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index e4dcde78f3f9..d250013bc37b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,36 @@ enum {
INTEL_GVT_MM_PPGTT,
};
+typedef enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
struct intel_vgpu_mm {
int type;
bool initialized;
@@ -151,6 +181,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page;
+struct intel_vgpu_scratch_pt {
+ struct page *page;
+ unsigned long page_mfn;
+};
+
+
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
@@ -160,8 +196,8 @@ struct intel_vgpu_gtt {
atomic_t n_write_protected_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
- struct page *scratch_page;
- unsigned long scratch_page_mfn;
+ struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 385969a89216..398877c3d2fd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -44,11 +44,14 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
+static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+ .vgpu_create = intel_gvt_create_vgpu,
+ .vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_reset = intel_gvt_reset_vgpu,
};
/**
@@ -81,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvm");
+ symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+#endif
}
/* Fail to load MPT modules - bail out */
@@ -193,6 +198,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -270,10 +278,25 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_cmd_parser;
- gvt_dbg_core("gvt device creation is done\n");
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
+ &intel_gvt_ops);
+ if (ret) {
+ gvt_err("failed to register gvt-g host device: %d\n", ret);
+ goto out_clean_types;
+ }
+
+ gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62fc9e3ac5c6..3d4223e8ebe3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -161,6 +161,20 @@ struct intel_vgpu {
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+ struct {
+ struct device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+ struct rb_root cache;
+ struct mutex cache_lock;
+ void *vfio_group;
+ struct notifier_block iommu_notifier;
+ } vdev;
+#endif
};
struct intel_gvt_gm {
@@ -190,6 +204,16 @@ struct intel_gvt_opregion {
u32 opregion_pa;
};
+#define NR_MAX_INTEL_VGPU_TYPES 20
+struct intel_vgpu_type {
+ char name[16];
+ unsigned int max_instance;
+ unsigned int avail_instance;
+ unsigned int low_gm_size;
+ unsigned int high_gm_size;
+ unsigned int fence;
+};
+
struct intel_gvt {
struct mutex lock;
struct drm_i915_private *dev_priv;
@@ -205,6 +229,8 @@ struct intel_gvt {
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct intel_vgpu_type *types;
+ unsigned int num_types;
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
@@ -231,6 +257,14 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt);
int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
@@ -330,11 +364,14 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
}
}
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *
- param);
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
+
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -369,10 +406,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
-int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
-int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
@@ -385,6 +422,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
+struct intel_gvt_ops {
+ int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
+ struct intel_vgpu_type *);
+ void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_reset)(struct intel_vgpu *);
+};
+
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 9ab1f95dddc5..522809710312 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1158,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- u32 mode = *(u32 *)p_data;
+ u32 mode;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
@@ -1275,19 +1278,18 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (offset) {
case 0x4ddc:
vgpu_vreg(vgpu, offset) = 0x8000003c;
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
break;
case 0x42080:
vgpu_vreg(vgpu, offset) = 0x8000;
+ /* WaCompressedResourceDisplayNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
break;
default:
return -EINVAL;
}
- /**
- * TODO: need detect stepping info after gvt contain such information
- * 0x4ddc enabled after C0, 0x42080 enabled after E0.
- */
- I915_WRITE(reg, vgpu_vreg(vgpu, offset));
return 0;
}
@@ -1367,6 +1369,9 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
int rc = 0;
unsigned int id = 0;
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) = 0;
+
switch (offset) {
case 0x4260:
id = RCS;
@@ -1392,6 +1397,23 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
return rc;
}
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ data |= RESET_CTL_READY_TO_RESET;
+ else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ data &= ~RESET_CTL_READY_TO_RESET;
+
+ vgpu_vreg(vgpu, offset) = data;
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
f, s, am, rm, d, r, w); \
@@ -1485,7 +1507,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
@@ -1494,7 +1516,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_D(GAM_ECOCHK, D_ALL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x9030, D_ALL);
MMIO_D(0x20a0, D_ALL);
MMIO_D(0x2420, D_ALL);
@@ -1503,7 +1525,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x2438, D_ALL);
MMIO_D(0x243c, D_ALL);
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
/* display */
@@ -2116,6 +2138,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_MBCTL, D_ALL);
MMIO_D(0x911c, D_ALL);
MMIO_D(0x9120, D_ALL);
+ MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
MMIO_D(0x48800, D_ALL);
@@ -2298,6 +2321,15 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+#define RING_REG(base) (base + 0xd0)
+ MMIO_RING_F(RING_REG, 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+#undef RING_REG
+
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
@@ -2345,7 +2377,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
- MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
@@ -2364,7 +2396,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
MMIO_D(0xfdc, D_BDW);
- MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
@@ -2375,10 +2407,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0xb10c, D_BDW);
MMIO_D(0xb110, D_BDW);
- MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL);
+ MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x83a4, D_BDW);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
@@ -2392,9 +2424,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_D(0x2248, D_BDW);
@@ -2425,6 +2457,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
MMIO_D(0x45504, D_SKL);
@@ -2574,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x51000, D_SKL);
MMIO_D(0x6c00c, D_SKL);
- MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
MMIO_D(0x20e0, D_SKL);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 027ef558d91c..30e543f5a703 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,21 +33,14 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
-struct intel_gvt_io_emulation_ops {
- int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
- int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
- int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
- int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
-};
-
-extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
-
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*host_init)(struct device *dev, void *gvt, const void *ops);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@@ -60,8 +53,7 @@ struct intel_gvt_mpt {
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
- unsigned long mfn, unsigned int nr, bool map,
- int type);
+ unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
new file mode 100644
index 000000000000..dc0365033157
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -0,0 +1,597 @@
+/*
+ * KVMGT - the implementation of Intel mediated pass-through framework for KVM
+ *
+ * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Jike Song <jike.song@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/uuid.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
+ long npage, int prot, unsigned long *phys_pfn)
+{
+ return 0;
+}
+static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
+ long npage)
+{
+ return 0;
+}
+
+static const struct intel_gvt_ops *intel_gvt_ops;
+
+
+/* helper macros copied from vfio-pci */
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_region {
+ u32 type;
+ u32 subtype;
+ size_t size;
+ u32 flags;
+};
+
+struct kvmgt_pgfn {
+ gfn_t gfn;
+ struct hlist_node hnode;
+};
+
+struct kvmgt_guest_info {
+ struct kvm *kvm;
+ struct intel_vgpu *vgpu;
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
+
+struct gvt_dma {
+ struct rb_node node;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+};
+
+static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct rb_node *node = vgpu->vdev.cache.rb_node;
+ struct gvt_dma *ret = NULL;
+
+ while (node) {
+ struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else {
+ ret = itr;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct gvt_dma *entry;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find(vgpu, gfn);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+
+ return entry == NULL ? 0 : entry->pfn;
+}
+
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct gvt_dma *new, *itr;
+ struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+
+ new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
+ if (!new)
+ return;
+
+ new->gfn = gfn;
+ new->pfn = pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, node);
+
+ if (gfn == itr->gfn)
+ goto out;
+ else if (gfn < itr->gfn)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &vgpu->vdev.cache);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+
+out:
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ kfree(new);
+}
+
+static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
+ struct gvt_dma *entry)
+{
+ rb_erase(&entry->node, &vgpu->vdev.cache);
+ kfree(entry);
+}
+
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct device *dev = vgpu->vdev.mdev;
+ struct gvt_dma *this;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ this = __gvt_cache_find(vgpu, gfn);
+ if (!this) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+ }
+
+ pfn = this->pfn;
+ WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ __gvt_cache_remove_entry(vgpu, this);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.cache = RB_ROOT;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_destroy(struct intel_vgpu *vgpu)
+{
+ struct gvt_dma *dma;
+ struct rb_node *node = NULL;
+ struct device *dev = vgpu->vdev.mdev;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while ((node = rb_first(&vgpu->vdev.cache))) {
+ dma = rb_entry(node, struct gvt_dma, node);
+ pfn = dma->pfn;
+
+ kvmgt_unpin_pages(dev, &pfn, 1);
+ __gvt_cache_remove_entry(vgpu, dma);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static struct attribute *type_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group *intel_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = type_attrs;
+ intel_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = intel_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = intel_vgpu_type_groups[i];
+ kfree(group);
+ }
+}
+
+static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+{
+ hash_init(info->ptable);
+}
+
+static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+{
+ struct kvmgt_pgfn *p;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static struct kvmgt_pgfn *
+__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p, *res = NULL;
+
+ hash_for_each_possible(info->ptable, p, hnode, gfn) {
+ if (gfn == p->gfn) {
+ res = p;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ return !!p;
+}
+
+static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ return;
+
+ p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ if (WARN(!p, "gfn: 0x%llx\n", gfn))
+ return;
+
+ p->gfn = gfn;
+ hash_add(info->ptable, &p->hnode, gfn);
+}
+
+static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ if (p) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+{
+ if (!intel_gvt_init_vgpu_type_groups(gvt))
+ return -EFAULT;
+
+ intel_gvt_ops = ops;
+
+ /* MDEV is not yet available */
+ return -ENODEV;
+}
+
+static void kvmgt_host_exit(struct device *dev, void *gvt)
+{
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+}
+
+static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_add(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (!kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
+{
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
+ (void *)val, len);
+}
+
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ int i;
+ gfn_t gfn;
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+ if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static bool kvmgt_check_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char s[12];
+ unsigned int *i;
+
+ eax = KVM_CPUID_SIGNATURE;
+ ebx = ecx = edx = 0;
+
+ asm volatile ("cpuid"
+ : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ :
+ : "cc", "memory");
+ i = (unsigned int *)s;
+ i[0] = ebx;
+ i[1] = ecx;
+ i[2] = edx;
+
+ return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
+}
+
+/**
+ * NOTE:
+ * It's actually impossible to check if we are running in KVM host,
+ * since the "KVM host" is simply native. So we only dectect guest here.
+ */
+static int kvmgt_detect_host(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
+ return -ENODEV;
+ }
+#endif
+ return kvmgt_check_guest() ? -ENODEV : 0;
+}
+
+static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static void kvmgt_detach_vgpu(unsigned long handle)
+{
+ /* nothing to do here */
+}
+
+static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct intel_vgpu *vgpu = info->vgpu;
+
+ if (vgpu->vdev.msi_trigger)
+ return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+
+ return false;
+}
+
+static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+{
+ unsigned long pfn;
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ int rc;
+
+ pfn = gvt_cache_find(info->vgpu, gfn);
+ if (pfn != 0)
+ return pfn;
+
+ rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (rc != 1) {
+ gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
+ return 0;
+ }
+
+ gvt_cache_add(info->vgpu, gfn, pfn);
+ return pfn;
+}
+
+static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+{
+ unsigned long pfn;
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ pfn = kvmgt_gfn_to_pfn(handle, gfn);
+ if (!pfn)
+ return NULL;
+
+ return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
+}
+
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
+{
+ void *hva = NULL;
+
+ hva = kvmgt_gpa_to_hva(handle, gpa);
+ if (!hva)
+ return -EFAULT;
+
+ if (write)
+ memcpy(hva, buf, len);
+ else
+ memcpy(buf, hva, len);
+
+ return 0;
+}
+
+static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+}
+
+static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+}
+
+static unsigned long kvmgt_virt_to_pfn(void *addr)
+{
+ return PFN_DOWN(__pa(addr));
+}
+
+struct intel_gvt_mpt kvmgt_mpt = {
+ .detect_host = kvmgt_detect_host,
+ .host_init = kvmgt_host_init,
+ .host_exit = kvmgt_host_exit,
+ .attach_vgpu = kvmgt_attach_vgpu,
+ .detach_vgpu = kvmgt_detach_vgpu,
+ .inject_msi = kvmgt_inject_msi,
+ .from_virt_to_mfn = kvmgt_virt_to_pfn,
+ .set_wp_page = kvmgt_write_protect_add,
+ .unset_wp_page = kvmgt_write_protect_remove,
+ .read_gpa = kvmgt_read_gpa,
+ .write_gpa = kvmgt_write_gpa,
+ .gfn_to_mfn = kvmgt_gfn_to_pfn,
+};
+EXPORT_SYMBOL_GPL(kvmgt_mpt);
+
+static int __init kvmgt_init(void)
+{
+ return 0;
+}
+
+static void __exit kvmgt_exit(void)
+{
+}
+
+module_init(kvmgt_init);
+module_exit(kvmgt_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 585b01f63254..09c9450a1946 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,10 +67,9 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
@@ -179,10 +178,9 @@ err:
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9dc739a01892..87d5b5e366a3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -87,10 +87,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
})
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
-int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
- unsigned int bytes);
-int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
- unsigned int bytes);
+
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67858782d327..1af5830c0a56 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -56,6 +56,35 @@ static inline int intel_gvt_hypervisor_detect_host(void)
}
/**
+ * intel_gvt_hypervisor_host_init - init GVT-g host side
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+static inline int intel_gvt_hypervisor_host_init(struct device *dev,
+ void *gvt, const void *ops)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_init)
+ return 0;
+
+ return intel_gvt_host.mpt->host_init(dev, gvt, ops);
+}
+
+/**
+ * intel_gvt_hypervisor_host_exit - exit GVT-g host side
+ */
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
+ void *gvt)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_exit)
+ return;
+
+ intel_gvt_host.mpt->host_exit(dev, gvt);
+}
+
+/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
@@ -64,6 +93,10 @@ static inline int intel_gvt_hypervisor_detect_host(void)
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->attach_vgpu)
+ return 0;
+
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
@@ -76,6 +109,10 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
*/
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->detach_vgpu)
+ return;
+
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
}
@@ -224,11 +261,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
-enum {
- GVT_MAP_APERTURE = 0,
- GVT_MAP_OPREGION,
-};
-
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
@@ -236,7 +268,6 @@ enum {
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
- * @type: map type
*
* Returns:
* Zero on success, negative error code if failed.
@@ -244,10 +275,14 @@ enum {
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
- bool map, int type)
+ bool map)
{
+ /* a MPT implementation could have MMIO mapped elsewhere */
+ if (!intel_gvt_host.mpt->map_gfn_to_mfn)
+ return 0;
+
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
- map, type);
+ map);
}
/**
@@ -263,6 +298,10 @@ static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{
+ /* a MPT implementation could have MMIO trapped elsewhere */
+ if (!intel_gvt_host.mpt->set_trap_area)
+ return 0;
+
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 95218913b0bc..d2a0fbc896c3 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
- mfn, 1, map, GVT_MAP_OPREGION);
+ mfn, 1, map);
if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret;
@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
*/
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- int i;
-
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va)
return;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- vunmap(vgpu_opregion(vgpu)->va);
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- if (vgpu_opregion(vgpu)->pages[i]) {
- put_page(vgpu_opregion(vgpu)->pages[i]);
- vgpu_opregion(vgpu)->pages[i] = NULL;
- }
- }
- } else {
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER);
- }
- vgpu_opregion(vgpu)->va = NULL;
+ vgpu_opregion(vgpu)->va = NULL;
+ }
}
/**
@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
ret = map_vgpu_opregion(vgpu, true);
if (ret)
return ret;
- } else {
- gvt_dbg_core("emulate opregion from userspace\n");
-
- /*
- * If opregion pages are not allocated from host kenrel,
- * most of the params are meaningless
- */
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
- 0, /* not used */
- 0, /* not used */
- 2, /* not used */
- 1,
- GVT_MAP_OPREGION);
- if (ret)
- return ret;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 3af894b3d257..44136b1f3aab 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -152,6 +152,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg(vgpu, regs[ring_id]) = 0;
intel_uncore_forcewake_put(dev_priv, fw);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 1df6a5460f3e..678b0be85376 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -36,12 +36,10 @@
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_execlist *execlist;
enum intel_engine_id i;
struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- execlist = &vgpu->execlist[i];
if (!list_empty(workload_q_head(vgpu, i)))
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 18acb45dd14d..f898df38dd9a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- dst = kmap_atomic(page);
+ dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
- kunmap_atomic(dst);
+ kunmap(page);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
return 0;
}
@@ -160,8 +160,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
@@ -174,6 +172,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n");
@@ -185,40 +185,35 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
- mutex_lock(&gvt->lock);
-
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
- goto err;
+ goto out;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err;
+ goto out;
ret = populate_shadow_context(workload);
if (ret)
- goto err;
+ goto out;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
- goto err;
+ goto out;
}
- mutex_unlock(&gvt->lock);
-
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request_no_flush(rq);
+ ret = 0;
workload->dispatched = true;
- return 0;
-err:
- workload->status = ret;
-
- mutex_unlock(&gvt->lock);
+out:
+ if (ret)
+ workload->status = ret;
i915_add_request_no_flush(rq);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -318,10 +313,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- src = kmap_atomic(page);
+ src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
- kunmap_atomic(src);
+ kunmap(page);
i++;
}
@@ -329,7 +324,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -347,7 +342,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
@@ -438,9 +433,9 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+ mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+ mutex_unlock(&gvt->lock);
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
@@ -455,15 +450,15 @@ static int workload_thread(void *priv)
if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
}
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
- mutex_lock(&gvt->dev_priv->drm.struct_mutex);
complete_current_workload(gvt, ring_id);
- mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
i915_gem_request_put(fetch_and_zero(&workload->req));
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 4f54005b976d..4f64845d8a4c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,9 +46,13 @@ int setup_vgpu_mmio(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
- if (!vgpu->mmio.vreg)
- return -ENOMEM;
+ if (vgpu->mmio.vreg)
+ memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
+ else {
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+ }
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
@@ -95,6 +99,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
@@ -133,6 +138,106 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
}
/**
+ * intel_gvt_init_vgpu_types - initialize vGPU type list
+ * @gvt : GVT device
+ *
+ * Initialize vGPU type list based on available resource.
+ *
+ */
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
+{
+ unsigned int num_types;
+ unsigned int i, low_avail;
+ unsigned int min_low;
+
+ /* vGPU type name is defined as GVTg_Vx_y which contains
+ * physical GPU generation type and 'y' means maximum vGPU
+ * instances user can create on one physical GPU for this
+ * type.
+ *
+ * Depend on physical SKU resource, might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
+ * different types of vGPU on same physical GPU depending on
+ * available resource. Each vGPU type will have "avail_instance"
+ * to indicate how many vGPU instance can be created for this
+ * type.
+ *
+ * Currently use static size here as we init type earlier..
+ */
+ low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ num_types = 4;
+
+ gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ GFP_KERNEL);
+ if (!gvt->types)
+ return -ENOMEM;
+
+ min_low = MB_TO_BYTES(32);
+ for (i = 0; i < num_types; ++i) {
+ if (low_avail / min_low == 0)
+ break;
+ gvt->types[i].low_gm_size = min_low;
+ gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].fence = 4;
+ gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ if (IS_GEN8(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V4_%u",
+ gvt->types[i].max_instance);
+ else if (IS_GEN9(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V5_%u",
+ gvt->types[i].max_instance);
+
+ min_low <<= 1;
+ gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance,
+ gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+
+ gvt->num_types = i;
+ return 0;
+}
+
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
+{
+ kfree(gvt->types);
+}
+
+static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+{
+ int i;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+ unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+
+ /* Need to depend on maxium hw resource size but keep on
+ * static config for now.
+ */
+ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
+ high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
+ fence_min = fence_avail / gvt->types[i].fence;
+ total_min = min(min(low_gm_min, high_gm_min), fence_min);
+ gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
+ total_min);
+
+ gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+}
+
+/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
*
@@ -166,20 +271,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
clean_vgpu_mmio(vgpu);
vfree(vgpu);
+ intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
}
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @param: vGPU creation parameters
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
struct intel_vgpu *vgpu;
@@ -224,15 +320,9 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- ret = intel_vgpu_init_opregion(vgpu, 0);
- if (ret)
- goto out_clean_gtt;
- }
-
ret = intel_vgpu_init_display(vgpu);
if (ret)
- goto out_clean_opregion;
+ goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu);
if (ret)
@@ -257,8 +347,6 @@ out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
-out_clean_opregion:
- intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
@@ -272,3 +360,49 @@ out_free_vgpu:
mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @type: type of the vGPU to create
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type)
+{
+ struct intel_vgpu_creation_params param;
+ struct intel_vgpu *vgpu;
+
+ param.handle = 0;
+ param.low_gm_sz = type->low_gm_size;
+ param.high_gm_sz = type->high_gm_size;
+ param.fence_sz = type->fence;
+
+ /* XXX current param based on MB */
+ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
+ param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+
+ vgpu = __intel_gvt_create_vgpu(gvt, &param);
+ if (IS_ERR(vgpu))
+ return vgpu;
+
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+
+ return vgpu;
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to reset a virtual GPU.
+ *
+ */
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
+{
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6b159bab42b0..b7f42c448a44 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -547,11 +547,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+ struct intel_engine_cs *engine = work->flip_queued_req->engine;
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
- i915_gem_request_get_seqno(work->flip_queued_req),
+ work->flip_queued_req->global_seqno,
atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
@@ -631,8 +631,9 @@ static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
- seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix,
+ seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name);
}
@@ -1761,8 +1762,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "self-refresh: %s\n",
- sr_enabled ? "enabled" : "disabled");
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
return 0;
}
@@ -3216,6 +3216,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
u32 ptr, read, write;
+ struct rb_node *rb;
seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
@@ -3254,11 +3255,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "\t\tELSP[1] idle\n");
rcu_read_unlock();
- spin_lock_irq(&engine->execlist_lock);
- list_for_each_entry(rq, &engine->execlist_queue, execlist_link) {
+ spin_lock_irq(&engine->timeline->lock);
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
+ rq = rb_entry(rb, typeof(*rq), priotree.node);
print_request(m, rq, "\t\tQ ");
}
- spin_unlock_irq(&engine->execlist_lock);
+ spin_unlock_irq(&engine->timeline->lock);
} else if (INTEL_GEN(dev_priv) > 6) {
seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0213a3090ab3..445fec9c2841 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
- if (INTEL_INFO(dev)->num_pipes == 0) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
@@ -323,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = i915_gem_mmap_gtt_version();
break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = dev_priv->engine[RCS] &&
+ dev_priv->engine[RCS]->schedule;
+ break;
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -374,12 +378,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -406,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -420,7 +424,7 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -460,7 +464,7 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
@@ -491,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
- intel_modeset_vga_set_state(dev, state);
+ intel_modeset_vga_set_state(to_i915(dev), state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -607,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
ret = intel_fbdev_init(dev);
@@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
@@ -1168,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
/**
* i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
@@ -1438,7 +1442,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev);
+ i915_gem_suspend_gtt_mappings(dev_priv);
i915_save_state(dev);
@@ -1512,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -2422,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6f4a6bcf6ed4..56002a52936d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -60,11 +60,15 @@
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_vma.h"
+
#include "intel_gvt.h"
/* General customization:
@@ -72,8 +76,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20161108"
-#define DRIVER_TIMESTAMP 1478587895
+#define DRIVER_DATE "20161121"
+#define DRIVER_TIMESTAMP 1479717903
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -125,6 +129,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -459,23 +468,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_i915_fence_reg {
- struct list_head link;
- struct drm_i915_private *i915;
- struct i915_vma *vma;
- int pin_count;
- int id;
- /**
- * Whether the tiling parameters for the currently
- * associated fence register have changed. Note that
- * for the purposes of tracking tiling changes we also
- * treat the unfenced register, the register slot that
- * the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- bool dirty;
-};
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -487,6 +479,7 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
@@ -500,8 +493,12 @@ struct drm_i915_display_funcs {
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate);
- void (*initial_watermarks)(struct intel_crtc_state *cstate);
- void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
@@ -562,6 +559,18 @@ enum forcewake_domains {
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
@@ -668,7 +677,7 @@ struct intel_csr {
func(is_skylake); \
func(is_broxton); \
func(is_kabylake); \
- func(is_preliminary); \
+ func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(has_csr); \
@@ -696,7 +705,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv)
+ func(supports_tv); \
+ func(has_decoupled_mmio)
struct sseu_dev_info {
u8 slice_mask;
@@ -949,6 +959,7 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */
unsigned int hw_id;
u32 user_handle;
+ int priority; /* greater priorities are serviced first */
u32 ggtt_alignment;
@@ -1791,6 +1802,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
+ struct kmem_cache *dependencies;
const struct intel_device_info info;
@@ -2053,13 +2065,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /*
- * The skl_wm_values structure is a bit too big for stack
- * allocation, so we keep the staging struct where we store
- * intermediate results here instead.
- */
- struct skl_wm_values skl_results;
-
/* current hardware state */
union {
struct ilk_wm_values hw;
@@ -2179,31 +2184,6 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-wise. This
@@ -2225,292 +2205,6 @@ struct drm_i915_gem_object_ops {
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- /** List of VMAs backed by this object */
- struct list_head vma_list;
- struct rb_root vma_tree;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- struct list_head global_link;
- union {
- struct rcu_head rcu;
- struct llist_node freed;
- };
-
- /**
- * Whether the object is currently in the GGTT mmap.
- */
- struct list_head userfault_link;
-
- /** Used in execbuf to temporarily hold a ref */
- struct list_head obj_exec_link;
-
- struct list_head batch_pool_link;
-
- unsigned long flags;
-
- /**
- * Have we taken a reference for the object for incomplete GPU
- * activity?
- */
-#define I915_BO_ACTIVE_REF 0
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned long gt_ro:1;
- unsigned int cache_level:3;
- unsigned int cache_dirty:1;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int active_count;
- unsigned int pin_display;
-
- struct {
- struct mutex lock; /* protects the pages and their use */
- atomic_t pages_pin_count;
-
- struct sg_table *pages;
- void *mapping;
-
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * This is set if the object has been written to since the
- * pages were last acquired.
- */
- bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
- } mm;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct reservation_object *resv;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct reservation_object __builtin_resv;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-/**
- * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
- * @filp: DRM file private date
- * @handle: userspace handle
- *
- * Returns:
- *
- * A pointer to the object named by the handle if such exists on @filp, NULL
- * otherwise. This object is only valid whilst under the RCU read lock, and
- * note carefully the object may be in the process of being destroyed.
- */
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
-{
-#ifdef CONFIG_LOCKDEP
- WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
-#endif
- return idr_find(&file->object_idr, handle);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- struct drm_i915_gem_object *obj;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
- rcu_read_unlock();
-
- return obj;
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_reference(&obj->base);
- return obj;
-}
-
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- __drm_gem_object_unreference(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
-static inline bool
-i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
-{
- return atomic_read(&obj->base.refcount.refcount) == 0;
-}
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline bool
-i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return obj->active_count;
-}
-
-static inline bool
-i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
-{
- return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
-
-static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
-static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
-{
- i915_gem_object_get(vma->obj);
- return vma;
-}
-
-static inline void i915_vma_put(struct i915_vma *vma)
-{
- i915_gem_object_put(vma->obj);
-}
-
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2683,24 +2377,19 @@ struct drm_i915_cmd_table {
int count;
};
-/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
- struct drm_i915_private *__p; \
- if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
- __p = (struct drm_i915_private *)p; \
- else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
- __p = to_i915((struct drm_device *)p); \
- else \
- BUILD_BUG(); \
- __p; \
-})
-#define INTEL_INFO(p) (&__I915__(p)->info)
+static inline const struct intel_device_info *
+intel_info(const struct drm_i915_private *dev_priv)
+{
+ return &dev_priv->info;
+}
+
+#define INTEL_INFO(dev_priv) intel_info((dev_priv))
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
/*
@@ -2797,7 +2486,7 @@ struct drm_i915_cmd_table {
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
-#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
@@ -2851,28 +2540,31 @@ struct drm_i915_cmd_table {
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+ (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
+#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
+#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
-#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
+#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_contexts)
+#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
+
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
+ ((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
@@ -2889,8 +2581,8 @@ struct drm_i915_cmd_table {
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
-#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
+#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2898,24 +2590,24 @@ struct drm_i915_cmd_table {
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
+#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
-#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
@@ -2925,13 +2617,13 @@ struct drm_i915_cmd_table {
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
-#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
-#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
+#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2971,6 +2663,8 @@ struct drm_i915_cmd_table {
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
+
#include "i915_trace.h"
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -3222,13 +2916,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
@@ -3405,10 +3092,10 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
@@ -3419,6 +3106,11 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int priority);
+#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3478,57 +3170,13 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-/* i915_gem_fence.c */
+/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
-/**
- * i915_vma_pin_fence - pin fencing state
- * @vma: vma to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * vma (and its object) is ready to be used as a scanout target. Fencing
- * status must be synchronize first by calling i915_vma_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_vma_unpin_fence().
- *
- * Returns:
- *
- * True if the vma has a fence, false otherwise.
- */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_vma_unpin_fence - unpin fencing state
- * @vma: vma to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_vma_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-static inline void
-i915_vma_unpin_fence(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
-}
-
-void i915_gem_restore_fences(struct drm_device *dev);
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
@@ -3631,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3833,10 +3481,11 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
-extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+ bool state);
extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_device *dev);
-extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
@@ -3855,7 +3504,7 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 41e697e5dbcd..902fa427c196 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -48,7 +49,7 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
+ return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -220,15 +221,17 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
}
static void
-__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- i915_gem_clflush_object(obj, false);
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ drm_clflush_sg(pages);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -238,7 +241,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __i915_gem_object_release_shmem(obj);
+ __i915_gem_object_release_shmem(obj, pages);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -433,6 +436,70 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
return timeout;
}
+static void __fence_set_priority(struct dma_fence *fence, int prio)
+{
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+ if (!engine->schedule)
+ return;
+
+ engine->schedule(rq, prio);
+}
+
+static void fence_set_priority(struct dma_fence *fence, int prio)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], prio);
+ } else {
+ __fence_set_priority(fence, prio);
+ }
+}
+
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int prio)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], prio);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->resv);
+ }
+
+ if (excl) {
+ fence_set_priority(excl, prio);
+ dma_fence_put(excl);
+ }
+ return 0;
+}
+
/**
* Waits for rendering to the object to be completed
* @obj: i915 gem object
@@ -1757,7 +1824,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
@@ -2150,7 +2217,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- __i915_gem_object_release_shmem(obj);
+ __i915_gem_object_release_shmem(obj, pages);
i915_gem_gtt_finish_pages(obj, pages);
@@ -2232,6 +2299,30 @@ static unsigned int swiotlb_max_size(void)
#endif
}
+static void i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ return;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ /* called before being DMA mapped, no need to copy sg->dma_* */
+ new_sg = sg_next(new_sg);
+ }
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+}
+
static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2296,7 +2387,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto err_pages;
+ goto err_sg;
}
}
if (!i ||
@@ -2317,6 +2408,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
@@ -2326,8 +2420,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return st;
-err_pages:
+err_sg:
sg_mark_end(sg);
+err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
@@ -2657,7 +2752,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
- i915_gem_restore_fences(&dev_priv->drm);
+ i915_gem_restore_fences(dev_priv);
if (dev_priv->gt.awake) {
intel_sanitize_gt_powersave(dev_priv);
@@ -2689,12 +2784,17 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
- spin_lock(&engine->execlist_lock);
- INIT_LIST_HEAD(&engine->execlist_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
i915_gem_request_put(engine->execlist_port[0].request);
i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- spin_unlock(&engine->execlist_lock);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
}
@@ -2892,117 +2992,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static void __i915_vma_iounmap(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- if (vma->iomap == NULL)
- return;
-
- io_mapping_unmap(vma->iomap);
- vma->iomap = NULL;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- /* First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- */
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
-
- /* When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
-
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
- if (ret)
- break;
- }
-
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
- }
-
- if (i915_vma_is_pinned(vma))
- return -EBUSY;
-
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
-
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- if (i915_vma_is_map_and_fenceable(vma)) {
- /* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- __i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
- }
-
- if (likely(!vma->vm->closed)) {
- trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
- }
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
-
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
- if (vma->pages != obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
-
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
- return 0;
-}
-
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
int ret, i;
@@ -3018,201 +3007,43 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- struct i915_gem_timeline *tl;
int ret;
- list_for_each_entry(tl, &i915->gt.timelines, link) {
- ret = wait_for_timeline(tl, flags);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
-{
- struct drm_mm_node *gtt_space = &vma->node;
- struct drm_mm_node *other;
-
- /*
- * On some machines we have to be careful when putting differing types
- * of snoopable memory together to avoid the prefetcher crossing memory
- * domains and dying. During vm initialisation, we decide whether or not
- * these constraints apply and set the drm_mm.color_adjust
- * appropriately.
- */
- if (vma->vm->mm.color_adjust == NULL)
- return true;
-
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
- return false;
+ if (flags & I915_WAIT_LOCKED) {
+ struct i915_gem_timeline *tl;
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
- return false;
+ lockdep_assert_held(&i915->drm.struct_mutex);
- return true;
-}
-
-/**
- * i915_vma_insert - finds a slot for the vma in its address space
- * @vma: the vma
- * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: required alignment
- * @flags: mask of PIN_* flags to use
- *
- * First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
- *
- * Returns:
- * 0 on success, negative error code otherwise.
- */
-static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- struct drm_i915_gem_object *obj = vma->obj;
- u64 start, end;
- int ret;
-
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
-
- size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
-
- start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-
- end = vma->vm->total;
- if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
- if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
-
- /* If binding the object/GGTT view requires more space than the entire
- * aperture has, reject it early before evicting everything in a vain
- * attempt to find space.
- */
- if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
- end);
- return -E2BIG;
- }
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (flags & PIN_OFFSET_FIXED) {
- u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
- goto err_unpin;
+ return ret;
}
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
- goto err_unpin;
- }
-
- GEM_BUG_ON(vma->node.start < start);
- GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+ if (ret)
+ return ret;
}
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
-
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- obj->bind_count++;
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (!obj->mm.pages)
- return false;
+ return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
- return false;
+ return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3224,14 +3055,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
- return false;
+ return;
}
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
-
- return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3277,9 +3106,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+ i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
obj->base.write_domain = 0;
@@ -3378,12 +3205,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
@@ -3435,7 +3262,8 @@ restart:
if (ret)
return ret;
- if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -3477,20 +3305,14 @@ restart:
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
- if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
- }
-
return 0;
}
@@ -3646,7 +3468,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3798,100 +3624,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return ret < 0 ? ret : 0;
}
-static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- if (!drm_mm_node_allocated(&vma->node))
- return false;
-
- if (vma->node.size < size)
- return true;
-
- if (alignment && vma->node.start & (alignment - 1))
- return true;
-
- if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
- return true;
-
- if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
- return true;
-
- return false;
-}
-
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
-
- /*
- * Explicitly disable for rotated VMA since the display does not
- * need the fence and the VMA is not accessible to other users.
- */
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
- vma->flags |= I915_VMA_CAN_FENCE;
- else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
-{
- unsigned int bound = vma->flags;
- int ret;
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
-
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err;
- }
-
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err;
- }
-
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
- if (ret)
- goto err;
-
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
-
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
-
-err:
- __i915_vma_unpin(vma);
- return ret;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -4156,6 +3888,16 @@ out:
return err;
}
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
+}
+
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -4173,6 +3915,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
@@ -4235,7 +3978,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
+ if (HAS_LLC(dev_priv)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4481,7 +4224,7 @@ int i915_gem_suspend(struct drm_device *dev)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- if (HAS_HW_CONTEXTS(dev)) {
+ if (HAS_HW_CONTEXTS(dev_priv)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
@@ -4500,7 +4243,7 @@ void i915_gem_resume(struct drm_device *dev)
WARN_ON(dev_priv->gt.awake);
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
+ i915_gem_restore_gtt_mappings(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4511,11 +4254,9 @@ void i915_gem_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void i915_gem_init_swizzling(struct drm_device *dev)
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen < 5 ||
+ if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
@@ -4574,7 +4315,7 @@ i915_gem_init_hw(struct drm_device *dev)
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
+ if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev_priv))
@@ -4586,14 +4327,14 @@ i915_gem_init_hw(struct drm_device *dev)
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
@@ -4605,7 +4346,7 @@ i915_gem_init_hw(struct drm_device *dev)
BUG_ON(!dev_priv->kernel_context);
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
@@ -4720,7 +4461,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
@@ -4744,9 +4484,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- i915_gem_detect_bit_6_swizzle(dev);
+ i915_gem_detect_bit_6_swizzle(dev_priv);
}
int
@@ -4770,14 +4510,18 @@ i915_gem_load_init(struct drm_device *dev)
if (!dev_priv->requests)
goto err_vmas;
+ dev_priv->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!dev_priv->dependencies)
+ goto err_requests;
+
mutex_lock(&dev_priv->drm.struct_mutex);
INIT_LIST_HEAD(&dev_priv->gt.timelines);
- err = i915_gem_timeline_init(dev_priv,
- &dev_priv->gt.global_timeline,
- "[execution]");
+ err = i915_gem_timeline_init__global(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (err)
- goto err_requests;
+ goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
@@ -4805,6 +4549,8 @@ i915_gem_load_init(struct drm_device *dev)
return 0;
+err_dependencies:
+ kmem_cache_destroy(dev_priv->dependencies);
err_requests:
kmem_cache_destroy(dev_priv->requests);
err_vmas:
@@ -4821,6 +4567,12 @@ void i915_gem_load_cleanup(struct drm_device *dev)
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+ WARN_ON(!list_empty(&dev_priv->gt.timelines));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4905,7 +4657,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 735580d72eb1..51ec793f2e20 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,7 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
#endif
#define I915_NUM_ENGINES 5
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6dd475735f0a..1f94b8d6d83d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -476,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index fb5b44339f71..097d9d8c2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -287,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
if (DBG_USE_CPU_RELOC)
return DBG_USE_CPU_RELOC > 0;
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(to_i915(obj->base.dev)) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -833,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(to_i915(vma->obj->base.dev)))
return false;
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&vma->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(dev)) {
+ if (!HAS_RESOURCE_STREAMER(dev_priv)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
@@ -1878,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(to_i915(dev)) < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index cd59dbc6588c..0efa3571afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -368,15 +368,14 @@ i915_vma_get_fence(struct i915_vma *vma)
/**
* i915_gem_restore_fences - restore fence state
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -451,15 +450,14 @@ void i915_gem_restore_fences(struct drm_device *dev)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -473,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
new file mode 100644
index 000000000000..22c4a2d01adf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_FENCE_REG_H__
+#define __I915_FENCE_REG_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct i915_vma;
+
+struct drm_i915_fence_reg {
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
+ int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a5fafa3d4fc8..b4bde1452f2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,13 +96,6 @@
*
*/
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
-}
-
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -327,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_device *dev,
+static int __setup_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *kdev = &dev->pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
@@ -347,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev,
return 0;
}
-static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static int setup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, I915_GFP_DMA);
+ return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
}
-static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static void cleanup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (WARN_ON(!p->page))
return;
@@ -387,8 +382,8 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
#define kunmap_px(ppgtt, vaddr) \
kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
-#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
-#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
+#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
#define fill32_px(dev_priv, px, v) \
fill_page_dma_32((dev_priv), px_base(px), (v))
@@ -416,24 +411,23 @@ static void fill_page_dma_32(struct drm_i915_private *dev_priv,
}
static int
-setup_scratch_page(struct drm_device *dev,
+setup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch,
gfp_t gfp)
{
- return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_device *dev,
+static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch)
{
- cleanup_page_dma(dev, scratch);
+ cleanup_page_dma(dev_priv, scratch);
}
-static struct i915_page_table *alloc_pt(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
{
struct i915_page_table *pt;
- const size_t count = INTEL_INFO(dev)->gen >= 8 ?
- GEN8_PTES : GEN6_PTES;
+ const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
@@ -446,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- ret = setup_px(dev, pt);
+ ret = setup_px(dev_priv, pt);
if (ret)
goto fail_page_m;
@@ -460,9 +454,10 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+static void free_pt(struct drm_i915_private *dev_priv,
+ struct i915_page_table *pt)
{
- cleanup_px(dev, pt);
+ cleanup_px(dev_priv, pt);
kfree(pt->used_ptes);
kfree(pt);
}
@@ -491,7 +486,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -505,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
if (!pd->used_pdes)
goto fail_bitmap;
- ret = setup_px(dev, pd);
+ ret = setup_px(dev_priv, pd);
if (ret)
goto fail_page_m;
@@ -519,10 +514,11 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+static void free_pd(struct drm_i915_private *dev_priv,
+ struct i915_page_directory *pd)
{
if (px_page(pd)) {
- cleanup_px(dev, pd);
+ cleanup_px(dev_priv, pd);
kfree(pd->used_pdes);
kfree(pd);
}
@@ -538,10 +534,10 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
fill_px(to_i915(vm->dev), pd, scratch_pde);
}
-static int __pdp_init(struct drm_device *dev,
+static int __pdp_init(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev);
+ size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
@@ -570,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
}
static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev, pdp);
+ ret = __pdp_init(dev_priv, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev, pdp);
+ ret = setup_px(dev_priv, pdp);
if (ret)
goto fail_page_m;
@@ -599,12 +595,12 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_device *dev,
+static void free_pdp(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev)) {
- cleanup_px(dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ cleanup_px(dev_priv, pdp);
kfree(pdp);
}
}
@@ -638,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pdpe_t *page_directorypo;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
page_directorypo = kmap_px(pdp);
@@ -654,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
- WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
@@ -714,7 +710,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -741,7 +737,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
bitmap_clear(pt->used_ptes, pte, num_entries);
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
- free_pt(vm->dev, pt);
+ free_pt(to_i915(vm->dev), pt);
return true;
}
@@ -783,7 +779,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
}
if (bitmap_empty(pd->used_pdes, I915_PDES)) {
- free_pd(vm->dev, pd);
+ free_pd(to_i915(vm->dev), pd);
return true;
}
@@ -799,6 +795,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint64_t pdpe;
gen8_ppgtt_pdpe_t *pdpe_vaddr;
@@ -811,7 +808,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
- if (USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
pdpe_vaddr = kmap_px(pdp);
pdpe_vaddr[pdpe] = scratch_pdpe;
kunmap_px(ppgtt, pdpe_vaddr);
@@ -821,9 +818,9 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
mark_tlbs_dirty(ppgtt);
- if (USES_FULL_48BIT_PPGTT(vm->dev) &&
- bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
- free_pdp(vm->dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
+ free_pdp(dev_priv, pdp);
return true;
}
@@ -846,7 +843,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
gen8_ppgtt_pml4e_t scratch_pml4e =
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev));
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (WARN_ON(!pml4->pdps[pml4e]))
@@ -866,7 +863,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
else
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
@@ -901,7 +898,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
break;
pde = 0;
}
@@ -924,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -939,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct drm_device *dev,
+static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd)
{
int i;
@@ -951,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev,
if (WARN_ON(!pd->page_table[i]))
continue;
- free_pt(dev, pd->page_table[i]);
+ free_pt(dev_priv, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev);
+ vm->scratch_pd = alloc_pd(dev_priv);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev)) {
- vm->scratch_pdp = alloc_pdp(dev);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ vm->scratch_pdp = alloc_pdp(dev_priv);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -987,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev, vm->scratch_pd);
+ free_pd(dev_priv, vm->scratch_pd);
free_pt:
- free_pt(dev, vm->scratch_pt);
+ free_pt(dev_priv, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return ret;
}
@@ -1035,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- if (USES_FULL_48BIT_PPGTT(dev))
- free_pdp(dev, vm->scratch_pdp);
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
+ free_pdp(dev_priv, vm->scratch_pdp);
+ free_pd(dev_priv, vm->scratch_pd);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
- gen8_free_page_tables(dev, pdp->page_directory[i]);
- free_pd(dev, pdp->page_directory[i]);
+ gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
+ free_pd(dev_priv, pdp->page_directory[i]);
}
- free_pdp(dev, pdp);
+ free_pdp(dev_priv, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
}
- cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+ cleanup_px(dev_priv, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(to_i915(vm->dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ if (!USES_FULL_48BIT_PPGTT(dev_priv))
+ gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
@@ -1113,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1125,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
continue;
}
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt))
goto unwind_out;
@@ -1139,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev, pd->page_table[pde]);
+ free_pt(dev_priv, pd->page_table[pde]);
return -ENOMEM;
}
@@ -1174,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1185,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
if (test_bit(pdpe, pdp->used_pdpes))
continue;
- pd = alloc_pd(dev);
+ pd = alloc_pd(dev_priv);
if (IS_ERR(pd))
goto unwind_out;
@@ -1199,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -1227,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1235,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev);
+ pdp = alloc_pdp(dev_priv);
if (IS_ERR(pdp))
goto unwind_out;
@@ -1253,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev, pml4->pdps[pml4e]);
+ free_pdp(dev_priv, pml4->pdps[pml4e]);
return -ENOMEM;
}
@@ -1302,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
/* Wrap is never okay since we can only represent 48b, and we don't
@@ -1395,11 +1394,12 @@ err_out:
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+ free_pt(dev_priv,
+ pdp->page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
@@ -1450,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
return ret;
}
@@ -1460,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1531,7 +1531,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1551,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
int ret;
/* We allocate temp bitmap for page tables for no gain
@@ -1584,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1599,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ ret = setup_px(dev_priv, &ppgtt->pml4);
if (ret)
goto free_scratch;
@@ -1609,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
- ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+ ret = __pdp_init(dev_priv, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1619,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
+ if (intel_vgpu_active(dev_priv)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1801,22 +1802,21 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static void gen8_ppgtt_enable(struct drm_device *dev)
+static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+ u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
-static void gen7_ppgtt_enable(struct drm_device *dev)
+static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
enum intel_engine_id id;
@@ -1840,9 +1840,8 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
}
-static void gen6_ppgtt_enable(struct drm_device *dev)
+static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1928,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1959,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -2007,7 +2005,7 @@ unwind_out:
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(vm->dev, pt);
+ free_pt(dev_priv, pt);
}
mark_tlbs_dirty(ppgtt);
@@ -2016,16 +2014,16 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -2036,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -2054,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev, pt);
+ free_pt(dev_priv, pt);
gen6_free_scratch(vm);
}
@@ -2062,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2128,8 +2125,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
@@ -2200,10 +2196,15 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
-static void gtt_write_workarounds(struct drm_device *dev)
+static void i915_address_space_fini(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_gem_timeline_fini(&vm->timeline);
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+}
+static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+{
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
@@ -2236,11 +2237,9 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
return ret;
}
-int i915_ppgtt_init_hw(struct drm_device *dev)
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gtt_write_workarounds(dev);
+ gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
@@ -2248,17 +2247,17 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (i915.enable_execlists)
return 0;
- if (!USES_PPGTT(dev))
+ if (!USES_PPGTT(dev_priv))
return 0;
if (IS_GEN6(dev_priv))
- gen6_ppgtt_enable(dev);
+ gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
- gen7_ppgtt_enable(dev);
- else if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_enable(dev);
+ gen7_ppgtt_enable(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_enable(dev_priv);
else
- MISSING_CASE(INTEL_INFO(dev)->gen);
+ MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@@ -2286,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
@@ -2298,9 +2297,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- i915_gem_timeline_fini(&ppgtt->base.timeline);
- list_del(&ppgtt->base.global_link);
- drm_mm_takedown(&ppgtt->base.mm);
+ i915_address_space_fini(&ppgtt->base);
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
@@ -2362,15 +2359,14 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
i915_check_and_clear_faults(dev_priv);
@@ -2842,8 +2838,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
- drm_mm_takedown(&ggtt->base.mm);
- list_del(&ggtt->base.global_link);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_address_space_fini(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
@@ -2932,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
struct pci_dev *pdev = ggtt->base.dev->pdev;
phys_addr_t phys_addr;
int ret;
@@ -2946,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(to_i915(ggtt->base.dev)))
+ if (IS_BROXTON(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2955,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(ggtt->base.dev,
- &ggtt->base.scratch_page,
- GFP_DMA32);
+ ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3046,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->dev, &vm->scratch_page);
+ cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3262,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- ret = i915_gem_init_stolen(&dev_priv->drm);
+ ret = i915_gem_init_stolen(dev_priv);
if (ret)
goto out_gtt_cleanup;
@@ -3281,9 +3277,8 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
@@ -3318,7 +3313,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
@@ -3327,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
@@ -3348,176 +3343,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
-{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
- return;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
-
- obj->mm.dirty = true; /* be paranoid */
-
- if (i915_gem_object_has_active_reference(obj)) {
- i915_gem_object_clear_active_reference(obj);
- i915_gem_object_put(obj);
- }
-}
-
-static void
-i915_ggtt_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_write);
-
- intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
-}
-
-void i915_vma_destroy(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->fence);
-
- list_del(&vma->vm_link);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-void i915_vma_close(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
-
- list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
-
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-static inline long vma_compare(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
-
- if (vma->vm != vm)
- return vma->vm - vm;
-
- if (!view)
- return vma->ggtt_view.type;
-
- if (vma->ggtt_view.type != view->type)
- return vma->ggtt_view.type - view->type;
-
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params));
-}
-
-static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
- struct rb_node *rb, **p;
- int i;
-
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->exec_list);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_write,
- i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
- init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
- vma->vm = vm;
- vma->obj = obj;
- vma->size = obj->base.size;
-
- if (view) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
- vma->size <<= PAGE_SHIFT;
- }
- }
-
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
- list_add(&vma->obj_link, &obj->vma_list);
- } else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- list_add_tail(&vma->obj_link, &obj->vma_list);
- }
-
- rb = NULL;
- p = &obj->vma_tree.rb_node;
- while (*p) {
- struct i915_vma *pos;
-
- rb = *p;
- pos = rb_entry(rb, struct i915_vma, obj_node);
- if (vma_compare(pos, vm, view) < 0)
- p = &rb->rb_right;
- else
- p = &rb->rb_left;
- }
- rb_link_node(&vma->obj_node, rb, p);
- rb_insert_color(&vma->obj_node, &obj->vma_tree);
-
- return vma;
-}
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
-
- return __i915_vma_create(obj, vm, view);
-}
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -3530,7 +3355,7 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
- cmp = vma_compare(vma, vm, view);
+ cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
@@ -3555,7 +3380,7 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma) {
- vma = __i915_vma_create(obj, vm, view);
+ vma = i915_vma_create(obj, vm, view);
GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
}
@@ -3747,99 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
-/**
- * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
- * @vma: VMA to map
- * @cache_level: mapping cache level
- * @flags: flags like global or local mapping
- *
- * DMA addresses are taken from the scatter-gather table of this object (or of
- * this VMA in case of non-default GGTT views) and PTE entries set up.
- * Note that DMA addresses are also the only part of the SG table we care about.
- */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 bind_flags;
- u32 vma_flags;
- int ret;
-
- if (WARN_ON(flags == 0))
- return -EINVAL;
-
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
-
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
- if (bind_flags == 0)
- return 0;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
-
- vma->flags |= bind_flags;
- return 0;
-}
-
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
-{
- void __iomem *ptr;
-
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
-
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
-
- ptr = vma->iomap;
- if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
- vma->node.start,
- vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
-
- vma->iomap = ptr;
- }
-
- __i915_vma_pin(vma);
- return ptr;
-}
-
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
-{
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
-
- vma = fetch_and_zero(p_vma);
- if (!vma)
- return;
-
- obj = vma->obj;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- __i915_gem_object_release_unless_active(obj);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c23ef9db1f53..4f35be4c26c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,7 +35,9 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/mm.h>
+#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
@@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+struct sg_table;
+
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
@@ -168,135 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-};
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
-
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_GGTT;
-}
-
-static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CAN_FENCE;
-}
-
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
-}
+struct i915_vma;
struct i915_page_dma {
struct page *page;
@@ -606,13 +482,20 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv,
@@ -629,8 +512,8 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -653,88 +536,4 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
- return 0;
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
-
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_PIN_MASK;
-}
-
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
-{
- return i915_vma_pin_count(vma);
-}
-
-static inline void __i915_vma_pin(struct i915_vma *vma)
-{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
-}
-
-static inline void __i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->flags--;
-}
-
-static inline void i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- __i915_vma_unpin(vma);
-}
-
-/**
- * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
- * @vma: VMA to iomap
- *
- * The passed in VMA has to be pinned in the global GTT mappable region.
- * An extra pinning of the VMA is acquired for the return iomapping,
- * the caller must call i915_vma_unpin_iomap to relinquish the pinning
- * after the iomapping is no longer required.
- *
- * Callers must hold the struct_mutex.
- *
- * Returns a valid iomapped pointer or ERR_PTR.
- */
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
-
-/**
- * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
- * @vma: VMA to unpin
- *
- * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
- *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
- */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
new file mode 100644
index 000000000000..6a368de9d81e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <linux/reservation.h>
+
+#include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include <drm/drmP.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+ struct rb_root vma_tree;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
+
+ struct list_head batch_pool_link;
+
+ unsigned long flags;
+
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF 0
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+ unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
+ unsigned int active_count;
+ unsigned int pin_display;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ */
+ struct reservation_object *resv;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return obj->active_count;
+}
+
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0b3b051a5683..27e8f257fb39 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -113,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+static void
+__i915_priotree_add_dependency(struct i915_priotree *pt,
+ struct i915_priotree *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &pt->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+}
+
+static int
+i915_priotree_add_dependency(struct drm_i915_private *i915,
+ struct i915_priotree *pt,
+ struct i915_priotree *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ return 0;
+}
+
+static void
+i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+{
+ struct i915_dependency *dep, *next;
+
+ GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+
+ /* Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+}
+
+static void
+i915_priotree_init(struct i915_priotree *pt)
+{
+ INIT_LIST_HEAD(&pt->signalers_list);
+ INIT_LIST_HEAD(&pt->waiters_list);
+ RB_CLEAR_NODE(&pt->node);
+ pt->priority = INT_MIN;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -124,7 +200,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_sw_fence_done(&request->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&request->execute));
GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!request->i915->gt.active_requests);
trace_i915_gem_request_retire(request);
@@ -142,7 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
- request->i915->gt.active_requests--;
+ if (!--request->i915->gt.active_requests) {
+ GEM_BUG_ON(!request->i915->gt.awake);
+ mod_delayed_work(request->i915->wq,
+ &request->i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -182,6 +266,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_context_put(request->ctx);
dma_fence_signal(&request->fence);
+
+ i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
}
@@ -241,9 +327,8 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
- while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
- yield();
- yield();
+ while (intel_breadcrumbs_busy(i915))
+ cond_resched(); /* spin until threads are complete */
}
atomic_set(&timeline->next_seqno, seqno);
@@ -307,25 +392,16 @@ static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
return atomic_inc_return(&tl->next_seqno);
}
-static int __i915_sw_fence_call
-submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request =
- container_of(fence, typeof(*request), submit);
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
- unsigned long flags;
u32 seqno;
- if (state != FENCE_COMPLETE)
- return NOTIFY_DONE;
-
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
-
- /* Will be called from irq-context when using foreign DMA fences */
- spin_lock_irqsave(&timeline->lock, flags);
+ assert_spin_locked(&timeline->lock);
seqno = timeline_get_seqno(timeline->common);
GEM_BUG_ON(!seqno);
@@ -345,14 +421,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
- engine->submit_request(request);
- spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
- spin_unlock_irqrestore(&timeline->lock, flags);
+ i915_sw_fence_commit(&request->execute);
+}
+
+void i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_submit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ if (state == FENCE_COMPLETE) {
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), submit);
+
+ request->engine->submit_request(request);
+ }
+
+ return NOTIFY_DONE;
+}
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
return NOTIFY_DONE;
}
@@ -441,6 +546,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
__timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
+ i915_sw_fence_init(&req->execute, execute_notify);
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
+
+ i915_priotree_init(&req->priotree);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
@@ -495,6 +608,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
GEM_BUG_ON(to == from);
+ if (to->engine->schedule) {
+ ret = i915_priotree_add_dependency(to->i915,
+ &to->priotree,
+ &from->priotree);
+ if (ret < 0)
+ return ret;
+ }
+
if (to->timeline == from->timeline)
return 0;
@@ -650,6 +771,8 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
if (dev_priv->gt.awake)
return;
+ GEM_BUG_ON(!dev_priv->gt.active_requests);
+
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
@@ -718,9 +841,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev)
+ if (prev) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
+ if (engine->schedule)
+ __i915_priotree_add_dependency(&request->priotree,
+ &prev->priotree,
+ &request->dep,
+ 0);
+ }
spin_lock_irq(&timeline->lock);
list_add_tail(&request->link, &timeline->requests);
@@ -737,6 +866,19 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
i915_gem_mark_busy(engine);
+ /* Let the backend know a new request has arrived that may need
+ * to adjust the existing execution schedule due to a high priority
+ * request - i.e. we may want to preempt the current request in order
+ * to run a high priority dependency chain *before* we can execute this
+ * request.
+ *
+ * This is called before the request is ready to run so that we can
+ * decide whether to preempt the entire chain so that it is ready to
+ * run at the earliest possible convenience.
+ */
+ if (engine->schedule)
+ engine->schedule(request, request->ctx->priority);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -817,9 +959,9 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
}
static long
-__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
- unsigned int flags,
- long timeout)
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -831,9 +973,9 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
add_wait_queue(q, &reset);
do {
- prepare_to_wait(&request->submit.wait, &wait, state);
+ prepare_to_wait(&request->execute.wait, &wait, state);
- if (i915_sw_fence_done(&request->submit))
+ if (i915_sw_fence_done(&request->execute))
break;
if (flags & I915_WAIT_LOCKED &&
@@ -851,7 +993,7 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
timeout = io_schedule_timeout(timeout);
} while (timeout);
- finish_wait(&request->submit.wait, &wait);
+ finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(q, &reset);
@@ -903,13 +1045,14 @@ long i915_wait_request(struct drm_i915_gem_request *req,
trace_i915_gem_request_wait_begin(req);
- if (!i915_sw_fence_done(&req->submit)) {
- timeout = __i915_request_wait_for_submit(req, flags, timeout);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
if (timeout < 0)
goto complete;
- GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
}
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
@@ -1013,13 +1156,6 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
if (!dev_priv->gt.active_requests)
return;
- GEM_BUG_ON(!dev_priv->gt.awake);
-
for_each_engine(engine, dev_priv, id)
engine_retire_requests(engine);
-
- if (!dev_priv->gt.active_requests)
- mod_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 75f8360b3421..e2b077df2da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -30,6 +30,9 @@
#include "i915_gem.h"
#include "i915_sw_fence.h"
+struct drm_file;
+struct drm_i915_gem_object;
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -41,6 +44,33 @@ struct intel_signal_node {
struct intel_wait wait;
};
+struct i915_dependency {
+ struct i915_priotree *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+/* Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct rb_node node;
+ int priority;
+#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
/**
* Request queue structure.
*
@@ -84,8 +114,34 @@ struct drm_i915_gem_request {
struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
+
+ /* A list of everyone we wait upon, and everyone who waits upon us.
+ * Even though we will not be submitted to the hardware before the
+ * submit fence is signaled (it waits for all external events as well
+ * as our own requests), the scheduler still needs to know the
+ * dependency tree for the lifetime of the request (from execbuf
+ * to retirement), i.e. bidirectional dependency information for the
+ * request not tied to individual fences.
+ */
+ struct i915_priotree priotree;
+ struct i915_dependency dep;
u32 global_seqno;
@@ -143,9 +199,6 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
-
- /** Link in the execlist submission queue, guarded by execlist_lock. */
- struct list_head execlist_link;
};
extern const struct dma_fence_ops i915_fence_ops;
@@ -162,18 +215,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->global_seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
static inline struct drm_i915_gem_request *
to_request(struct dma_fence *fence)
{
@@ -226,6 +267,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request);
+void i915_gem_request_submit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b1d367dba347..ebaa941c83af 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -89,9 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
@@ -253,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -264,7 +263,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
- r = devm_request_mem_region(dev->dev, base + 1,
+ r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
@@ -408,9 +407,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-int i915_gem_init_stolen(struct drm_device *dev)
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -418,7 +416,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
@@ -427,7 +425,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -515,12 +513,10 @@ i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
- DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > ggtt->stolen_size - size);
+ GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -529,11 +525,11 @@ i915_pages_create_for_stolen(struct drm_device *dev,
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -557,7 +553,7 @@ i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- /* Should only be called during free */
+ /* Should only be called from i915_gem_object_release_stolen() */
sg_free_table(pages);
kfree(pages);
}
@@ -566,15 +562,16 @@ static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
+
+ GEM_BUG_ON(!stolen);
__i915_gem_object_unpin_pages(obj);
- if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
+ i915_gem_stolen_remove_node(dev_priv, stolen);
+ kfree(stolen);
}
+
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
@@ -596,7 +593,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(to_i915(dev)) ?
+ I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
@@ -619,7 +617,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 251d51b01174..c85e7b06bdba 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -60,9 +60,9 @@
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_private *dev_priv,
+ int stride, int size, int tiling_mode)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int tile_width;
/* Linear is always fine */
@@ -81,10 +81,10 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
@@ -104,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -199,7 +199,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev,
+ if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put(obj);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index fc8f13a79f8f..bf8a471b61e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -24,9 +24,11 @@
#include "i915_drv.h"
-int i915_gem_timeline_init(struct drm_i915_private *i915,
- struct i915_gem_timeline *timeline,
- const char *name)
+static int __i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name,
+ struct lock_class_key *lockclass,
+ const char *lockname)
{
unsigned int i;
u64 fences;
@@ -47,8 +49,11 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
tl->fence_context = fences++;
tl->common = timeline;
-
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
spin_lock_init(&tl->lock);
+#endif
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
}
@@ -56,6 +61,26 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
return 0;
}
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915, timeline, name,
+ &class, "&timeline->lock");
+}
+
+int i915_gem_timeline_init__global(struct drm_i915_private *i915)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915,
+ &i915->gt.global_timeline,
+ "[execution]",
+ &class, "&global_timeline->lock");
+}
+
void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
{
lockdep_assert_held(&tl->i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index f2bf7b1d49a1..98d99a62b4ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -67,6 +67,7 @@ struct i915_gem_timeline {
int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
+int i915_gem_timeline_init__global(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 64261639f547..107ddf51065e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -753,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 204093f3eaa5..ae84aa4b1467 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -528,8 +528,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
@@ -573,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev)) {
+ if (HAS_CSR(dev_priv)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
@@ -585,7 +584,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
@@ -600,10 +599,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
@@ -708,7 +707,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev, error->display);
+ intel_display_print_error_state(m, dev_priv, error->display);
out:
if (m->bytes == 0 && m->err)
@@ -861,16 +860,19 @@ out:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+ struct drm_i915_gem_request *request;
+
+ request = __i915_gem_active_peek(active);
+ return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
- engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
- return engine ? engine->id : -1;
+ request = __i915_gem_active_peek(active);
+ return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -884,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
- err->wseqno = __active_get_seqno(&vma->last_write);
- err->engine = __active_get_engine_id(&vma->last_write);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
@@ -1440,7 +1442,6 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1461,7 +1462,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
@@ -1473,10 +1474,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
@@ -1489,10 +1490,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev))
+ if (HAS_HW_CONTEXTS(dev_priv))
error->ccid = I915_READ(CCID);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 666dab7a675a..4462112725ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -629,11 +629,23 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
- unsigned int engine_id = rq->engine->id;
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ rq->previous_context = engine->last_context;
+ engine->last_context = rq->ctx;
+
+ i915_gem_request_submit(rq);
+
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
@@ -1520,6 +1532,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
+ engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
list_for_each_entry(request,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6d7505b5c5e7..07ca71cabb2b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2848,10 +2848,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ibx_irq_reset(struct drm_device *dev)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_NOP(dev_priv))
return;
@@ -2881,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -2951,9 +2947,9 @@ static void ironlake_irq_reset(struct drm_device *dev)
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2963,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -2999,7 +2995,7 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3222,7 +3218,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
@@ -3242,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3466,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3678,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -3712,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -3880,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4145,7 +4141,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
- if (HAS_GUC_SCHED(dev))
+ if (HAS_GUC_SCHED(dev_priv))
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 629e4334719c..d46ffe7086bc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = {
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
- .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
@@ -145,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
+module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+MODULE_PARM_DESC(alpha_support,
+ "Enable alpha quality driver support for latest hardware. "
+ "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 94efc899c1ef..817ad959941e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -40,7 +40,7 @@ struct i915_params {
int enable_ppgtt;
int enable_execlists;
int enable_psr;
- unsigned int preliminary_hw_support;
+ unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2a419500b81a..fce8e198bc76 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -363,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_guc = 1,
+ .has_decoupled_mmio = 1,
.ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -439,9 +440,10 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
+ "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
+ "to enable support in this kernel version, or check for kernel updates.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3361d7ffc63e..c70c07a7b586 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7342,6 +7342,13 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+/* Decoupled MMIO register pair for kernel driver */
+#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
+#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
+#define GEN9_DECOUPLED_DW1_GO (1<<31)
+#define GEN9_DECOUPLED_PD_SHIFT 28
+#define GEN9_DECOUPLED_OP_SHIFT 24
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 344cbf39cfa9..b0e1e7ca75da 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,12 +29,10 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration control */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
@@ -42,12 +40,10 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
-static void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
@@ -57,7 +53,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev_priv);
}
int i915_save_state(struct drm_device *dev)
@@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_save_display(dev);
+ i915_save_display(dev_priv);
if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
@@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
- i915_restore_display(dev);
+ i915_restore_display(dev_priv);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 95f2f12e0917..147420ccf49c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -116,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence)
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key)
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
- init_waitqueue_head(&fence->wait);
+ __init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 707dfc4f0da5..7508d23f823b 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -40,7 +40,22 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
enum i915_sw_fence_notify state);
#define __i915_sw_fence_call __aligned(4)
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key);
+#ifdef CONFIG_LOCKDEP
+#define i915_sw_fence_init(fence, fn) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_sw_fence_init((fence), (fn), #fence, &__key); \
+} while (0)
+#else
+#define i915_sw_fence_init(fence, fn) \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL)
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
new file mode 100644
index 000000000000..a792dcb902b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_vma.h"
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_frontbuffer.h"
+
+#include <drm/drm_gem.h>
+
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static struct i915_vma *
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ struct rb_node *rb, **p;
+ int i;
+
+ GEM_BUG_ON(vm->closed);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
+ vma->vm = vm;
+ vma->obj = obj;
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
+ } else {
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+ }
+
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (i915_vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 bind_flags;
+ u32 vma_flags;
+ int ret;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= I915_VMA_GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= I915_VMA_LOCAL_BIND;
+
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma_flags;
+ else
+ bind_flags &= ~vma_flags;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma);
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->flags |= bind_flags;
+ return 0;
+}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return IO_ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ __i915_vma_pin(vma);
+ return ptr;
+}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
+}
+
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
+
+ if (vma->node.size < size)
+ return true;
+
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_FIXED &&
+ vma->node.start != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ dev_priv->ggtt.mappable_end);
+
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *other;
+
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
+ */
+ if (vma->vm->mm.color_adjust == NULL)
+ return true;
+
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+/**
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
+ * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
+ u64 start, end;
+ int ret;
+
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
+
+ start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+
+ end = vma->vm->total;
+ if (flags & PIN_MAPPABLE)
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ if (flags & PIN_ZONE_4G)
+ end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
+ */
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ if (flags & PIN_OFFSET_FIXED) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret) {
+ ret = i915_gem_evict_for_vma(vma);
+ if (ret == 0)
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
+ }
+ } else {
+ u32 search_flag, alloc_flag;
+
+ if (flags & PIN_HIGH) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ }
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
+ goto search_free;
+
+ goto err_unpin;
+ }
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ }
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
+{
+ unsigned int bound = vma->flags;
+ int ret;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
+
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
+
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
+
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
+
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long active;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
+ */
+ __i915_vma_pin(vma);
+
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
+
+ __i915_vma_unpin(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ }
+
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* release the fence reg _after_ flushing */
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ __i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+ }
+
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
+ }
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
new file mode 100644
index 000000000000..85446f0b0b3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_H__
+#define __I915_VMA_H__
+
+#include <linux/io-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "i915_gem_gtt.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
+#include "i915_gem_request.h"
+
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
+ void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+};
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ i915_gem_object_put(vma->obj);
+}
+
+static inline long
+i915_vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
+
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
+}
+
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 71ab735cb82b..dbe9fb41ae53 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -106,6 +106,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
@@ -167,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ state->rotation & DRM_ROTATE_180 &&
+ state->rotation & DRM_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 813fd74d9c8d..1c509f7410f5 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -574,23 +574,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
enum port port = intel_encoder->port;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- connector = drm_select_eld(encoder);
- if (!connector)
+ connector = conn_state->connector;
+ if (!connector || !connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -601,7 +604,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 5ab646ef8c9f..7ffab1abc518 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1147,7 +1147,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->raw[25];
+ aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1677,7 +1677,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+ enum port port)
{
static const struct {
u16 dp, hdmi;
@@ -1691,22 +1692,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
- int i;
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
+ if (p_child->common.dvo_port == port_mapping[port].dp)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+ p_child->common.aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ if (child_dev_is_dp_dual_mode(p_child, port))
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index c410d3d6465f..c9c46a538edb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -629,35 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
- /* To avoid the task_struct disappearing beneath us as we wake up
- * the process, we must first inspect the task_struct->state under the
- * RCU lock, i.e. as we call wake_up_process() we must be holding the
- * rcu_read_lock().
- */
- for_each_engine(engine, i915, id)
- if (unlikely(intel_engine_wakeup(engine)))
- mask |= intel_engine_flag(engine);
+ for_each_engine(engine, i915, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
- return mask;
-}
+ spin_lock_irq(&b->lock);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int mask = 0;
+ if (b->first_wait) {
+ wake_up_process(b->first_wait->tsk);
+ mask |= intel_engine_flag(engine);
+ }
- for_each_engine(engine, i915, id) {
- if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
- wake_up_process(engine->breadcrumbs.signaler);
+ if (b->first_signal) {
+ wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
+
+ spin_unlock_irq(&b->lock);
}
return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 445108855275..d81232b79f00 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
- if (INTEL_INFO(dev)->gen > 6) {
+ if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
if (intel_crtc_state->limited_color_range)
@@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
@@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->degamma_lut) {
lut = (struct drm_color_lut *) state->degamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
word0 =
@@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->gamma_lut) {
lut = (struct drm_color_lut *) state->gamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
word0 =
@@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state)
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
sizeof(struct drm_color_lut);
/*
@@ -529,8 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
@@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc)
}
/* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
- INTEL_INFO(dev)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ true,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 30eb95b54dcf..86ecec5601d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int mode)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -673,8 +672,7 @@ static const struct dmi_system_id intel_spurious_crt_detect[] = {
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -693,7 +691,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -715,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -731,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_INFO(dev)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915.load_detect_test)
@@ -793,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -915,7 +912,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev) &&
+ if (I915_HAS_HOTPLUG(dev_priv) &&
!dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev_priv)) {
@@ -932,7 +929,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 938ac4dbcb45..10ec9d4b7d45 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1753,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1787,7 +1786,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_INFO(dev)->gen < 9)
+ else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (type == INTEL_OUTPUT_HDMI) {
@@ -1837,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1856,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
@@ -1866,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(intel_encoder);
+ intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c63ba7f435bb..b7a7ed82c325 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1035,9 +1035,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
@@ -1072,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
*/
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1087,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1293,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
@@ -1319,10 +1316,9 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int sprite;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
@@ -1336,12 +1332,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1595,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_device *dev)
+static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
@@ -1610,8 +1606,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
@@ -1622,7 +1617,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1647,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config->dpll_hw_state.dpll_md);
} else {
@@ -1682,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*/
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev)) {
+ !intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -3004,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
@@ -3021,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
@@ -3070,25 +3062,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
+ if (rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += crtc_state->pipe_src_w - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3097,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
- } else
- I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+ } else {
+ I915_WRITE(DSPADDR(plane),
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
+ }
POSTING_READ(reg);
}
@@ -3172,6 +3173,9 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -3180,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3376,9 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- const struct skl_plane_wm *p_wm =
- &crtc_state->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3414,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3449,18 +3445,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, p_wm,
- &dev_priv->wm.skl_results.ddb, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3510,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
+ i915_redisable_vga(to_i915(dev));
if (!state)
return 0;
@@ -3687,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3713,7 +3698,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
(pipe_config->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
@@ -4734,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- intel_crtc->pipe, SKL_CRTC_INDEX);
-
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
@@ -4761,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
@@ -4769,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->base.visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_plane->base.name,
- intel_crtc->pipe, drm_plane_index(&intel_plane->base));
-
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
@@ -5096,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5163,7 +5140,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else if (pipe_config->update_wm_pre)
intel_update_watermarks(crtc);
}
@@ -5319,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5377,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -5408,11 +5388,12 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5467,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
@@ -5483,7 +5464,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else
intel_update_watermarks(intel_crtc);
@@ -5494,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5599,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5617,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
@@ -7051,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev)->num_pipes == 2)
+ if (INTEL_INFO(dev_priv)->num_pipes == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7192,7 +7173,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -7786,12 +7767,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -8245,8 +8225,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -8272,7 +8251,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_GEN(dev_priv) > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8395,8 +8374,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -8432,7 +8410,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
@@ -8442,7 +8420,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_INFO(dev)->gen < 4 ||
+ if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -8650,8 +8628,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
@@ -8663,7 +8640,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -8727,7 +8704,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
@@ -8739,7 +8716,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
@@ -8808,8 +8785,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8848,7 +8824,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8856,7 +8832,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
@@ -9653,11 +9629,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9669,7 +9644,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
@@ -9871,7 +9846,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
@@ -10661,8 +10636,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10689,7 +10663,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
+ if (INTEL_GEN(dev_priv) < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -10704,8 +10678,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10738,7 +10711,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_init_scalers(dev_priv, crtc, pipe_config);
pipe_config->scaler_state.scaler_id = -1;
@@ -10748,7 +10721,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
@@ -10842,16 +10815,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- const struct skl_plane_wm *p_wm =
- &cstate->wm.skl.optimal.planes[PLANE_CURSOR];
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb);
-
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -10873,7 +10839,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10919,7 +10885,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- plane_state->base.rotation == DRM_ROTATE_180) {
+ plane_state->base.rotation & DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -12062,6 +12028,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12186,7 +12153,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
- if (INTEL_INFO(dev)->gen > 3 &&
+ if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
@@ -12261,7 +12228,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
engine = NULL;
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
@@ -12518,7 +12485,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
/* Pre-gen9 platforms need two-step watermark updates */
if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
if (visible || was_visible)
@@ -12623,7 +12590,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ ret = dev_priv->display.compute_intermediate_wm(dev,
intel_crtc,
pipe_config);
if (ret) {
@@ -12635,7 +12602,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
@@ -12748,6 +12715,16 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
+static inline void
+intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
+ unsigned int lane_count, struct intel_link_m_n *m_n)
+{
+ DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -12759,61 +12736,58 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
- crtc->base.base.id, crtc->base.name,
- context, pipe_config, pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
+ crtc->base.base.id, crtc->base.name, context);
- DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
- DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
- DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_pch_encoder,
- pipe_config->fdi_lanes,
- pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
- pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
- pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
- pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
- pipe_config->dp_m_n.tu);
-
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m2_n2.gmch_m,
- pipe_config->dp_m2_n2.gmch_n,
- pipe_config->dp_m2_n2.link_m,
- pipe_config->dp_m2_n2.link_n,
- pipe_config->dp_m2_n2.tu);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count, &pipe_config->dp_m_n);
+ if (pipe_config->has_drrs)
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio,
- pipe_config->has_infoframe);
+ pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
- DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
- DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
- DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled));
+
+ DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
@@ -12864,20 +12838,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
- plane->base.id, plane->name);
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
+ plane->base.id, plane->name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->pixel_format, &format_name));
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
}
}
@@ -13192,12 +13166,11 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
}
static bool
-intel_pipe_config_compare(struct drm_device *dev,
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool ret = true;
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
@@ -13317,7 +13290,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -13366,7 +13339,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -13441,8 +13414,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
@@ -13451,7 +13423,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
@@ -13557,11 +13529,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
- drm_for_each_connector(connector, dev) {
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
@@ -13676,7 +13652,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
+ if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
@@ -13769,15 +13745,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
static void
intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
if (!needs_modeset(new_state) &&
!to_intel_crtc_state(new_state)->update_pipe)
return;
verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, crtc);
+ verify_connector_state(crtc->dev, state, crtc);
verify_crtc_state(crtc, old_state, new_state);
verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
}
@@ -13793,10 +13770,11 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
verify_encoder_state(dev);
- verify_connector_state(dev, NULL);
+ verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
@@ -14094,7 +14072,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (i915.fastboot &&
- intel_pipe_config_compare(dev,
+ intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -14294,6 +14272,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int updated = 0;
bool progress;
enum pipe pipe;
+ int i;
+
+ const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ /* ignore allocations for crtc's that have been turned off. */
+ if (crtc->state->active)
+ entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
* Whenever the number of active pipes changes, we need to make sure we
@@ -14302,7 +14288,6 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* cause pipe underruns and other bad stuff.
*/
do {
- int i;
progress = false;
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14313,12 +14298,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
cstate = to_intel_crtc_state(crtc->state);
pipe = intel_crtc->pipe;
- if (updated & cmask || !crtc->state->active)
+ if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(state, intel_crtc))
+
+ if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
continue;
updated |= cmask;
+ entries[i] = &cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -14327,7 +14314,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* new ddb allocation to take effect.
*/
if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
- &intel_crtc->hw_ddb) &&
+ &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14358,14 +14345,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+ if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- }
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14398,8 +14379,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active)
- intel_update_watermarks(intel_crtc);
+ if (!crtc->state->active) {
+ /*
+ * Make sure we don't call initial_watermarks
+ * for ILK-style watermark updates.
+ */
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(crtc->state));
+ else
+ intel_update_watermarks(intel_crtc);
+ }
}
}
@@ -14422,7 +14412,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev);
+ intel_modeset_verify_disabled(dev, state);
}
/* Complete the events for pipes that have now been disabled */
@@ -14465,7 +14455,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_cstate = to_intel_crtc_state(crtc->state);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_cstate);
+ dev_priv->display.optimize_watermarks(intel_state,
+ intel_cstate);
}
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14474,7 +14465,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14557,10 +14548,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * nonblocking commits are only safe for pure plane updates. Everything else
- * should work though.
- *
* RETURNS
* Zero for success or -errno.
*/
@@ -14572,11 +14559,6 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (intel_state->modeset && nonblock) {
- DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
- return -EINVAL;
- }
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -14594,10 +14576,16 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
drm_atomic_state_get(state);
INIT_WORK(&state->commit_work,
nonblock ? intel_atomic_commit_work : NULL);
@@ -14720,8 +14708,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct intel_atomic_state *intel_state =
to_intel_atomic_state(new_state->state);
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
@@ -14775,10 +14762,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
GFP_KERNEL);
if (ret < 0)
return ret;
+
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
@@ -14811,7 +14800,7 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14822,7 +14811,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev)->cursor_needs_physical))
+ !INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
}
@@ -14900,30 +14889,32 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(crtc->state);
- struct intel_crtc_state *old_intel_state =
+ struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
- enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
- return;
+ goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
- if (intel_cstate->update_pipe) {
- intel_update_pipe_config(intel_crtc, old_intel_state);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ if (intel_cstate->update_pipe)
+ intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
- I915_WRITE(PIPE_WM_LINETIME(pipe),
- intel_cstate->wm.skl.optimal.linetime);
- }
+out:
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(old_intel_state,
+ intel_cstate);
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14989,11 +14980,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
state->scaler_id = -1;
}
primary->pipe = pipe;
- primary->plane = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ primary->plane = (enum plane) !pipe;
+ else
+ primary->plane = (enum plane) pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->plane = !pipe;
if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
@@ -15046,6 +15042,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
} else if (INTEL_GEN(dev_priv) >= 4) {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
@@ -15147,13 +15147,13 @@ intel_update_cursor_plane(struct drm_plane *plane,
{
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
if (!obj)
addr = 0;
- else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -15280,14 +15280,14 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (!plane) {
+ if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
}
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (!cursor) {
+ if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto fail;
}
@@ -15299,16 +15299,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- /*
- * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
intel_crtc->pipe = pipe;
- intel_crtc->plane = (enum plane) pipe;
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = !pipe;
- }
+ intel_crtc->plane = primary->plane;
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
@@ -15401,11 +15393,9 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
return true;
}
-static bool intel_crt_present(struct drm_device *dev)
+static bool intel_crt_present(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return false;
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
@@ -15479,7 +15469,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
intel_lvds_init(dev);
- if (intel_crt_present(dev))
+ if (intel_crt_present(dev_priv))
intel_crt_init(dev);
if (IS_BROXTON(dev_priv)) {
@@ -15527,7 +15517,7 @@ static void intel_setup_outputs(struct drm_device *dev)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+ dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
@@ -15570,14 +15560,14 @@ static void intel_setup_outputs(struct drm_device *dev)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
@@ -15634,7 +15624,7 @@ static void intel_setup_outputs(struct drm_device *dev)
} else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev);
intel_psr_init(dev);
@@ -15689,6 +15679,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
@@ -15769,7 +15761,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_INFO(dev)->gen < 9) {
+ if (INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
@@ -15832,7 +15824,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_GEN(dev_priv) > 3) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15840,7 +15832,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_INFO(dev)->gen < 9) {
+ INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15849,7 +15841,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15866,7 +15858,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 5) {
+ if (INTEL_GEN(dev_priv) < 5) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -16295,9 +16287,8 @@ static void intel_init_quirks(struct drm_device *dev)
}
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
@@ -16339,6 +16330,7 @@ static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
@@ -16367,12 +16359,14 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ intel_state = to_intel_atomic_state(state);
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16396,7 +16390,7 @@ retry:
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(cs);
+ dev_priv->display.optimize_watermarks(intel_state, cs);
}
put_state:
@@ -16429,7 +16423,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev_priv);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
/*
@@ -16475,8 +16469,8 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev)->num_pipes,
- INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+ INTEL_INFO(dev_priv)->num_pipes,
+ INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
int ret;
@@ -16497,7 +16491,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
intel_setup_outputs(dev);
drm_modeset_lock_all(dev);
@@ -16564,11 +16558,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val;
- if (INTEL_INFO(dev)->num_pipes == 1)
+ if (INTEL_INFO(dev_priv)->num_pipes == 1)
return true;
val = I915_READ(DSPCNTR(!crtc->plane));
@@ -16642,7 +16635,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
@@ -16744,21 +16737,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga_power_on(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
}
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -16769,7 +16759,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- i915_redisable_vga_power_on(dev);
+ i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
@@ -16841,7 +16831,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- crtc->active ? "enabled" : "disabled");
+ enableddisabled(crtc->active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -16874,9 +16864,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id,
- encoder->base.name,
- encoder->base.crtc ? "enabled" : "disabled",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -16905,9 +16894,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id,
- connector->base.name,
- connector->base.encoder ? "enabled" : "disabled");
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
for_each_intel_crtc(dev, crtc) {
@@ -17155,10 +17143,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
/*
* set vga decode state - true == enable VGA decode
*/
-int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
@@ -17312,16 +17299,15 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
@@ -17335,13 +17321,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9df331b3305b..90283edcafba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -942,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
pps_lock(intel_dp);
@@ -1542,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1578,7 +1577,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
@@ -1791,9 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) &&
- pipe_config->limited_color_range)
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -2515,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2735,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2777,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -2787,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
}
@@ -2924,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2942,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2979,13 +2976,12 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_INFO(dev)->gen >= 9) {
+ else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -4873,15 +4869,13 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return false;
if (port == PORT_A)
@@ -5483,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_INFO(dev)->gen <= 6) {
+ if (INTEL_GEN(dev_priv) <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
@@ -5657,7 +5651,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5666,7 +5660,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
@@ -5678,7 +5672,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev, port))
+ if (intel_dp_is_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -5742,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5816,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev,
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3ffbd69e4551..b029d1026a28 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
bpp = 24;
/*
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 21853a17b6d9..58a756f2f224 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -188,13 +188,12 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return;
if (pll == NULL)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 398195bf6dd1..cd132c216a67 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -294,6 +294,9 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -652,7 +655,6 @@ struct intel_crtc_state {
bool double_wide;
- bool dp_encoder_is_mst;
int pbn;
struct intel_crtc_scaler_state scaler_state;
@@ -728,9 +730,6 @@ struct intel_crtc {
bool cxsr_allowed;
} wm;
- /* gen9+: ddb allocation currently being used */
- struct skl_ddb_entry hw_ddb;
-
int scanline_offset;
struct {
@@ -1187,7 +1186,9 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
@@ -1392,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1738,18 +1739,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- struct intel_crtc *intel_crtc);
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb);
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- int plane);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 9f279a3d0f74..0d8ff0034b88 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -774,9 +774,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
- DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
- DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
- "disabled" : "enabled");
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@@ -795,8 +794,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
- intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
- "disabled" : "enabled");
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 841f8d1e1410..3da4d466e332 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -102,6 +102,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
+ /* Nothing to do here, execute in order of dependencies */
+ engine->schedule = NULL;
+
dev_priv->engine[id] = engine;
return 0;
}
@@ -236,8 +239,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index e230d480c5e6..62f215b12eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
- return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
+ return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
}
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen < 4;
+ return INTEL_GEN(dev_priv) < 4;
}
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen <= 3;
+ return INTEL_GEN(dev_priv) <= 3;
}
/*
@@ -351,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -365,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -381,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -561,7 +561,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -594,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
@@ -708,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -812,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
@@ -854,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (intel_vgpu_active(dev_priv)) {
@@ -874,16 +873,6 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
- fbc->no_fbc_reason = "no enabled pipes can have FBC";
- return false;
- }
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
- fbc->no_fbc_reason = "no enabled planes can have FBC";
- return false;
- }
-
return true;
}
@@ -1066,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- bool fbc_crtc_present = false;
- int i, j;
+ bool crtc_chosen = false;
+ int i;
mutex_lock(&fbc->lock);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (fbc->crtc == to_intel_crtc(crtc)) {
- fbc_crtc_present = true;
- break;
- }
- }
- /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
- if (!fbc_crtc_present && fbc->crtc != NULL)
+ /* Does this atomic commit involve the CRTC currently tied to FBC? */
+ if (fbc->crtc &&
+ !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ goto out;
+
+ if (!intel_fbc_can_enable(dev_priv))
goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1092,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
+ struct intel_crtc_state *intel_crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
if (!intel_plane_state->base.visible)
continue;
- for_each_crtc_in_state(state, crtc, crtc_state, j) {
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ continue;
- if (plane_state->crtc != crtc)
- continue;
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ continue;
- if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
- break;
+ intel_crtc_state = to_intel_crtc_state(
+ drm_atomic_get_existing_crtc_state(state, &crtc->base));
- intel_crtc_state->enable_fbc = true;
- goto out;
- }
+ intel_crtc_state->enable_fbc = true;
+ crtc_chosen = true;
+ break;
}
+ if (!crtc_chosen)
+ fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
out:
mutex_unlock(&fbc->lock);
}
@@ -1386,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
/* This value was pulled out of someone's hat */
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 977e39722894..beb08982dc0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -356,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
@@ -509,7 +509,7 @@ retry:
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
@@ -697,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -714,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes, 4);
+ INTEL_INFO(dev_priv)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 76ceb539f9f0..7bab41218cf7 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 1aa85236b788..34d6ad2cf7c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -566,7 +566,7 @@ fail:
ret = 0;
}
- if (err == 0 && !HAS_GUC_UCODE(dev))
+ if (err == 0 && !HAS_GUC_UCODE(dev_priv))
; /* Don't mention the GuC! */
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
@@ -725,18 +725,18 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- if (!HAS_GUC(dev)) {
+ if (!HAS_GUC(dev_priv)) {
i915.enable_guc_loading = 0;
i915.enable_guc_submission = 0;
} else {
/* A negative value means "use platform default" */
if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
- if (!HAS_GUC_UCODE(dev)) {
+ if (!HAS_GUC_UCODE(dev_priv)) {
fw_path = NULL;
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 35ada4e1c6cf..fb88e32e25a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
- if (crtc->config->pipe_bpp > 24 &&
- crtc->config->pixel_multiplier > 1) {
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
* 4. enable HDMI clock gating
*/
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
@@ -1645,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
}
@@ -1663,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1674,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b5811a..3d546c019de0 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (intel_connector->mst_port)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
intel_connector->encoder->hpd_pin > HPD_NONE) {
connector->polled = enabled ?
DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dde04b7643b1..0a09024d6ca3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -432,8 +432,10 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *cursor, *last;
+ struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
+ unsigned long flags;
+ struct rb_node *rb;
bool submit = false;
last = port->request;
@@ -469,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->execlist_lock);
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *cursor =
+ rb_entry(rb, typeof(*cursor), priotree.node);
+
/* Can we combine this request with the current port? It has to
* be the same context/ringbuffer and not have any exceptions
* (e.g. GVT saying never to combine contexts).
@@ -493,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(cursor->ctx))
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
@@ -501,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
port++;
}
+
+ rb = rb_next(rb);
+ rb_erase(&cursor->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&cursor->priotree.node);
+ cursor->priotree.priority = INT_MAX;
+
+ /* We keep the previous context alive until we retire the
+ * following request. This ensures that any the context object
+ * is still pinned for any residual writes the HW makes into it
+ * on the context switch into the next object following the
+ * breadcrumb. Otherwise, we may retire the context too early.
+ */
+ cursor->previous_context = engine->last_context;
+ engine->last_context = cursor->ctx;
+
+ __i915_gem_request_submit(cursor);
last = cursor;
submit = true;
}
if (submit) {
- /* Decouple all the requests submitted from the queue */
- engine->execlist_queue.next = &cursor->execlist_link;
- cursor->execlist_link.prev = &engine->execlist_queue;
-
i915_gem_request_assign(&port->request, last);
+ engine->execlist_first = rb;
}
- spin_unlock(&engine->execlist_lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (submit)
execlists_submit_ports(engine);
@@ -614,27 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
+static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+{
+ struct rb_node **p, *rb;
+ bool first = true;
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct i915_priotree *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), node);
+ if (pt->priority > pos->priority) {
+ p = &rb->rb_left;
+ } else {
+ p = &rb->rb_right;
+ first = false;
+ }
+ }
+ rb_link_node(&pt->node, rb, p);
+ rb_insert_color(&pt->node, root);
+
+ return first;
+}
+
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
-
- list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ if (insert_request(&request->priotree, &engine->execlist_queue))
+ engine->execlist_first = &request->priotree.node;
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_irqrestore(&engine->execlist_lock, flags);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static struct intel_engine_cs *
+pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine;
+
+ engine = container_of(pt,
+ struct drm_i915_gem_request,
+ priotree)->engine;
+ if (engine != locked) {
+ if (locked)
+ spin_unlock_irq(&locked->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
+ }
+
+ return engine;
+}
+
+static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+{
+ static DEFINE_MUTEX(lock);
+ struct intel_engine_cs *engine = NULL;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ LIST_HEAD(dfs);
+
+ if (prio <= READ_ONCE(request->priotree.priority))
+ return;
+
+ /* Need global lock to use the temporary link inside i915_dependency */
+ mutex_lock(&lock);
+
+ stack.signaler = &request->priotree;
+ list_add(&stack.dfs_link, &dfs);
+
+ /* Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_priotree *pt, prio) {
+ * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * insert_request(pt);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ list_for_each_entry(p, &pt->signalers_list, signal_link)
+ if (prio > READ_ONCE(p->signaler->priority))
+ list_move_tail(&p->dfs_link, &dfs);
+
+ p = list_next_entry(dep, dfs_link);
+ if (!RB_EMPTY_NODE(&pt->node))
+ continue;
+
+ engine = pt_lock_engine(pt, engine);
+
+ /* If it is not already in the rbtree, we can update the
+ * priority inplace and skip over it (and its dependencies)
+ * if it is referenced *again* as we descend the dfs.
+ */
+ if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
+ pt->priority = prio;
+ list_del_init(&dep->dfs_link);
+ }
+ }
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = pt_lock_engine(pt, engine);
+
+ if (prio <= pt->priority)
+ continue;
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
+
+ pt->priority = prio;
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
+
+ if (engine)
+ spin_unlock_irq(&engine->timeline->lock);
+
+ mutex_unlock(&lock);
+
+ /* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -1673,8 +1813,10 @@ void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
+ }
}
static void
@@ -1687,6 +1829,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index de7b3e6ed477..d12ef0047d49 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,8 +122,7 @@ out:
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7acbbbf97833..f4429f67a4e3 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
-static u32 get_did(struct intel_opregion *opregion, int i)
-{
- u32 did;
-
- if (i < ARRAY_SIZE(opregion->acpi->didl)) {
- did = opregion->acpi->didl[i];
- } else {
- i -= ARRAY_SIZE(opregion->acpi->didl);
-
- if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
- return 0;
-
- did = opregion->acpi->did2[i];
- }
-
- return did;
-}
-
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
@@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct drm_connector *connector)
+static u32 acpi_display_type(struct intel_connector *connector)
{
u32 display_type;
- switch (connector->connector_type) {
+ switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
display_type = ACPI_DISPLAY_TYPE_VGA;
@@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
default:
- MISSING_CASE(connector->connector_type);
+ MISSING_CASE(connector->base.connector_type);
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
}
@@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_connector *connector;
- acpi_handle handle;
- struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
- unsigned long long device_id;
- acpi_status status;
- u32 temp, max_outputs;
- int i = 0;
-
- handle = ACPI_HANDLE(&pdev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev))
- return;
-
- if (acpi_is_video_device(handle))
- acpi_video_bus = acpi_dev;
- else {
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev->handle)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
- }
-
- if (!acpi_video_bus) {
- DRM_DEBUG_KMS("No ACPI video bus found\n");
- return;
- }
+ struct intel_connector *connector;
+ int i = 0, max_outputs;
+ int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
- max_outputs);
- return;
- }
- status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- set_did(opregion, i++, (u32)(device_id & 0x0f0f));
- }
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ if (i < max_outputs)
+ set_did(opregion, i, device_id);
+ i++;
}
-end:
DRM_DEBUG_KMS("%d outputs detected\n", i);
+ if (i > max_outputs)
+ DRM_ERROR("More than %d outputs in connector list\n",
+ max_outputs);
+
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
- return;
-
-blind_set:
- i = 0;
- list_for_each_entry(connector,
- &dev_priv->drm.mode_config.connector_list, head) {
- int display_type = acpi_display_type(connector);
-
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs in connector list\n",
- max_outputs);
- return;
- }
-
- temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | display_type | i);
- i++;
- }
- goto end;
}
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_connector *connector;
int i = 0;
- u32 disp_id;
-
- /* Initialize the CADL field by duplicating the DIDL values.
- * Technically, this is not always correct as display outputs may exist,
- * but not active. This initialization is necessary for some Clevo
- * laptops that check this field before processing the brightness and
- * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
- * there are less than eight devices. */
- do {
- disp_id = get_did(opregion, i);
- opregion->acpi->cadl[i] = disp_id;
- } while (++i < 8 && disp_id != 0);
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
}
void intel_opregion_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index be4b4d546fd9..08ab6d762ca4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -366,7 +366,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
- panel->backlight.enabled ? "enabled" : "disabled",
+ enableddisabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6f516491a172..bbb1eaf1e6db 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -347,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
return;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n",
- enable ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
}
@@ -1061,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
- const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ const int sr_fifo_size =
+ INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
struct intel_plane *plane;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
@@ -1091,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+ wm_state->num_levels = dev_priv->wm.max_level + 1;
wm_state->num_active_planes = 0;
@@ -1179,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
@@ -1861,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
-static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 3072;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
return 768;
else
return 512;
}
-static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
- int level, bool is_sprite)
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -1888,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
return level == 0 ? 63 : 255;
}
-static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
- int level)
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 31;
else
return 15;
@@ -1912,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1920,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev)->num_pipes;
+ fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_INFO(dev)->gen <= 6)
+ if (INTEL_GEN(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -1943,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1956,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev, level);
+ return ilk_cursor_wm_reg_max(to_i915(dev), level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1968,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
}
-static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev, level, false);
- max->spr = ilk_plane_wm_reg_max(dev, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev, level);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
@@ -2382,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2401,7 +2405,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev, 1, &max);
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
@@ -2530,7 +2534,7 @@ static void ilk_wm_merge(struct drm_device *dev,
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+ merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2593,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2619,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -2630,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -2780,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2836,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -3118,7 +3122,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = to_intel_crtc(for_crtc)->hw_ddb;
+ /*
+ * alloc may be cleared by clear_intel_crtc_state,
+ * copy from old state to be sure
+ */
+ *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
return;
}
@@ -3624,6 +3632,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_min_scanlines = 4;
}
+ if (apply_memory_bw_wa)
+ y_min_scanlines *= 2;
+
plane_bytes_per_line = width * cpp;
if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
@@ -3644,8 +3655,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
plane_blocks_per_line);
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
- if (apply_memory_bw_wa)
- y_tile_minimum *= 2;
if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
@@ -3906,25 +3915,16 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- struct intel_crtc *intel_crtc)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore)
{
- struct drm_crtc *other_crtc;
- struct drm_crtc_state *other_cstate;
- struct intel_crtc *other_intel_crtc;
- const struct skl_ddb_entry *ddb =
- &to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb;
int i;
- for_each_crtc_in_state(state, other_crtc, other_cstate, i) {
- other_intel_crtc = to_intel_crtc(other_crtc);
-
- if (other_intel_crtc == intel_crtc)
- continue;
-
- if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb))
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (i != ignore && entries[i] &&
+ skl_ddb_entries_overlap(ddb, entries[i]))
return true;
- }
return false;
}
@@ -4193,14 +4193,35 @@ skl_compute_wm(struct drm_atomic_state *state)
return 0;
}
-static void skl_update_wm(struct intel_crtc *intel_crtc)
+static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
+{
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ enum pipe pipe = crtc->pipe;
+ int plane;
+
+ if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ return;
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
+
+ for_each_universal_plane(dev_priv, pipe, plane)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
+
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+}
+
+static void skl_initial_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_wm_values *results = &state->wm_results;
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
@@ -4208,27 +4229,11 @@ static void skl_update_wm(struct intel_crtc *intel_crtc)
mutex_lock(&dev_priv->wm.wm_mutex);
- /*
- * If this pipe isn't active already, we're going to be enabling it
- * very soon. Since it's safe to update a pipe's ddb allocation while
- * the pipe's shut off, just do so here. Already active pipes will have
- * their watermarks updated once we update their planes.
- */
- if (intel_crtc->base.state->active_changed) {
- int plane;
-
- for_each_universal_plane(dev_priv, pipe, plane)
- skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
- &results->ddb, plane);
-
- skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR],
- &results->ddb);
- }
+ if (cstate->base.active_changed)
+ skl_atomic_update_crtc_wm(state, cstate);
skl_copy_wm_for_pipe(hw_vals, results, pipe);
- intel_crtc->hw_ddb = cstate->wm.skl.ddb;
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -4265,7 +4270,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 &&
+ if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
@@ -4283,7 +4288,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4294,7 +4300,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4605,7 +4612,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
@@ -7690,7 +7697,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.initial_watermarks = skl_initial_wm;
+ dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 271a3e29ff23..7b488e2793d9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
@@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
@@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 700e93d80616..aeb637dc1fdf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1294,6 +1294,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
+ i915_gem_request_submit(request);
+
I915_WRITE_TAIL(request->engine, request->tail);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 642b54692d0d..3466b4e77e7c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,6 +267,15 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct drm_i915_gem_request *req);
+ /* Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ *
+ * Called under the struct_mutex.
+ */
+ void (*schedule)(struct drm_i915_gem_request *request,
+ int priority);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -335,12 +344,12 @@ struct intel_engine_cs {
/* Execlists */
struct tasklet_struct irq_tasklet;
- spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
- struct list_head execlist_queue;
+ struct rb_root execlist_queue;
+ struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
@@ -578,7 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 05994083e161..356c662ad453 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1066,7 +1066,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(&dev_priv->drm, pipe) {
+ for_each_pipe(dev_priv, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1097,7 +1097,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(&dev_priv->drm);
+ i915_redisable_vga_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3990c805a5b5..27808e91cb5a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_display_mode *mode = &crtc_state->base.mode;
@@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* Set the SDVO control regs. */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1294,7 +1293,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv)) {
@@ -1305,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_INFO(dev)->gen < 5)
+ INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ca02855435d9..8f131a08d440 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,13 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- const struct skl_plane_wm *p_wm =
- &crtc_state->wm.skl.optimal.planes[plane];
u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -233,9 +228,6 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, plane);
-
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -291,19 +283,9 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!dplane->state->visible)
- skl_write_plane_wm(to_intel_crtc(crtc),
- &cstate->wm.skl.optimal.planes[plane],
- &dev_priv->wm.skl_results.ddb, plane);
-
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -362,7 +344,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
- unsigned int rotation = dplane->state->rotation;
+ unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
@@ -427,6 +409,12 @@ vlv_update_plane(struct drm_plane *dplane,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -436,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SP_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += src_w;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -546,6 +534,9 @@ ivb_update_plane(struct drm_plane *plane,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
@@ -566,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SPRITE_ROTATE_180;
-
- /* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- x += src_w;
- y += src_h;
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += src_w;
+ y += src_h;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -684,6 +672,9 @@ ilk_update_plane(struct drm_plane *plane,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
@@ -700,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dvscntr |= DVS_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
}
@@ -1112,6 +1101,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
} else {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9212f00d5752..78cdfc6833d6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1029,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1116,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e2b188dcf908..d7be0d94ba4d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -402,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -419,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ /* Enable Decoupled MMIO only on BXT C stepping onwards */
+ if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ info->has_decoupled_mmio = false;
+
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
@@ -641,6 +647,8 @@ intel_fw_table_check(struct drm_i915_private *dev_priv)
num_ranges = dev_priv->uncore.fw_domains_table_entries;
for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(IS_GEN9(dev_priv) &&
+ (prev + 1) != (s32)ranges->start);
WARN_ON_ONCE(prev >= (s32)ranges->start);
prev = ranges->start;
WARN_ON_ONCE(prev >= (s32)ranges->end);
@@ -783,7 +791,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
@@ -831,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+static const enum decoupled_power_domain fw2dpd_domain[] = {
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_BLITTER,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+/*
+ * Decoupled MMIO access for only 1 DWORD
+ */
+static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain,
+ enum decoupled_ops operation)
+{
+ enum decoupled_power_domain dp_domain;
+ u32 ctrl_reg_data = 0;
+
+ dp_domain = fw2dpd_domain[fw_domain - 1];
+
+ ctrl_reg_data |= reg;
+ ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
+ ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
+ ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ GEN9_DECOUPLED_REG0_DW1) &
+ GEN9_DECOUPLED_DW1_GO) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Decoupled MMIO wait timed out\n");
+}
+
+static inline u32
+__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain)
+{
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_READ);
+
+ return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
+}
+
+static inline void
+__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 data,
+ enum forcewake_domains fw_domain)
+{
+
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
+
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_WRITE);
+}
+
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv);
@@ -935,6 +1003,28 @@ fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
GEN6_READ_FOOTER; \
}
+#define __gen9_decoupled_read(x) \
+static u##x \
+gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, bool trace) { \
+ enum forcewake_domains fw_engine; \
+ GEN6_READ_HEADER(x); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
+ unsigned i; \
+ u32 *ptr_data = (u32 *) &val; \
+ for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
+ *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
+ offset, \
+ fw_engine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ GEN6_READ_FOOTER; \
+}
+
+__gen9_decoupled_read(32)
+__gen9_decoupled_read(64)
__fwtable_read(8)
__fwtable_read(16)
__fwtable_read(32)
@@ -1064,6 +1154,25 @@ fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bo
GEN6_WRITE_FOOTER; \
}
+#define __gen9_decoupled_write(x) \
+static void \
+gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, u##x val, \
+ bool trace) { \
+ enum forcewake_domains fw_engine; \
+ GEN6_WRITE_HEADER; \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
+ __gen9_decoupled_mmio_write(dev_priv, \
+ offset, \
+ val, \
+ fw_engine); \
+ else \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
+}
+
+__gen9_decoupled_write(32)
__fwtable_write(8)
__fwtable_write(16)
__fwtable_write(32)
@@ -1287,6 +1396,14 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
+ }
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
@@ -1368,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
+ (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
break;
}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db9621f1f0..8886cab19f98 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
- u8 not_common3[12];
+ u8 aux_channel;
+ u8 not_common3[11];
u8 iboost_level;
} __packed;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4e1ae3fc462d..6be515a9fb69 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
+ /*
+ * Planes must be disabled before DC clock is removed, as otherwise the
+ * attached IDMACs will be left in undefined state, possibly hanging
+ * the IPU or even system.
+ */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock);
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
- /* always disable planes on the CRTC */
- drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
-
drm_crtc_vblank_off(crtc);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f05ed0e1f3d6..6f240021705b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -139,6 +139,7 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
+ struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
wmb(); /* make sure dsi controller enabled again */
}
+static void dsi_hpd_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, hpd_work);
+
+ drm_helper_hpd_irq_event(msm_host->dev);
+}
+
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41..26e3a01a99c2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb002..49008451085b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" },
.num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index aa94a553794f..143eab46ba68 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 92da69aa6187..99590758c68b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ac9e4cde1380..8b4e3004f451 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
- MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
- .nb_stages = 5,
+ .nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index fa2be7ce9468..c205c360e16d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
plane_cnt++;
}
- /*
- * If there is no base layer, enable border color.
- * Although it's not possbile in current blend logic,
- * put it here as a reminder.
- */
- if (!pstates[STAGE_BASE] && plane_cnt) {
+ if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
}
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
return pa->state->zpos - pb->state->zpos;
}
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
- int cnt = 0, i;
+ int cnt = 0, base = 0, i;
DBG("%s: check", mdp5_crtc->name);
- /* verify that there are not too many planes attached to crtc
- * and that we don't have conflicting mixer stages:
- */
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
- if (cnt >= (hw_cfg->lm.nb_stages)) {
- dev_err(dev->dev, "too many planes!\n");
- return -EINVAL;
- }
-
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
+ base++;
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ if ((cnt + base) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes!\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < cnt; i++) {
- pstates[i].state->stage = STAGE_BASE + i;
+ pstates[i].state->stage = STAGE_BASE + i + base;
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
pipe2name(mdp5_plane_pipe(pstates[i].plane)),
pstates[i].state->stage);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 8bf55e3450c5..81c0562ab489 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -307,8 +307,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) &&
!pipe_supports_yuv(mdp5_plane->caps)) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support YUV\n");
+ DBG("Pipe doesn't support YUV\n");
return -EINVAL;
}
@@ -316,8 +315,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
(((state->src_w >> 16) != state->crtc_w) ||
((state->src_h >> 16) != state->crtc_h))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+ DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
state->src_w >> 16, state->src_h >> 16,
state->crtc_w, state->crtc_h);
@@ -333,8 +331,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support flip\n");
+ DBG("Pipe doesn't support flip\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8d21fb27a401..440c00ff8409 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -234,7 +234,7 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms)
+ if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (gpu) {
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 283d2841ba58..192b2d3a79cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+
+ if (priv->shrinker.nr_deferred) {
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+ }
}
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 04270f5d110c..74fc9362ecf9 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
return 0;
}
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a61c0d460ec2..4b5eab8a47b3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
qdev->client_monitors_config->count = count;
}
+enum {
+ MONITORS_CONFIG_MODIFIED,
+ MONITORS_CONFIG_UNCHANGED,
+ MONITORS_CONFIG_BAD_CRC,
+};
+
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
{
int i;
int num_monitors;
uint32_t crc;
+ int status = MONITORS_CONFIG_UNCHANGED;
num_monitors = qdev->rom->client_monitors_config.count;
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
@@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
sizeof(qdev->rom->client_monitors_config),
qdev->rom->client_monitors_config_crc);
- return 1;
+ return MONITORS_CONFIG_BAD_CRC;
}
if (num_monitors > qdev->monitors_config->max_allowed) {
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
@@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
} else {
num_monitors = qdev->rom->client_monitors_config.count;
}
+ if (qdev->client_monitors_config
+ && (num_monitors != qdev->client_monitors_config->count)) {
+ status = MONITORS_CONFIG_MODIFIED;
+ }
qxl_alloc_client_monitors_config(qdev, num_monitors);
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
@@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&qdev->rom->client_monitors_config.heads[i];
struct qxl_head *client_head =
&qdev->client_monitors_config->heads[i];
- client_head->x = c_rect->left;
- client_head->y = c_rect->top;
- client_head->width = c_rect->right - c_rect->left;
- client_head->height = c_rect->bottom - c_rect->top;
- client_head->surface_id = 0;
- client_head->id = i;
- client_head->flags = 0;
+ if (client_head->x != c_rect->left) {
+ client_head->x = c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->y != c_rect->top) {
+ client_head->y = c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->width != c_rect->right - c_rect->left) {
+ client_head->width = c_rect->right - c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->height != c_rect->bottom - c_rect->top) {
+ client_head->height = c_rect->bottom - c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->surface_id != 0) {
+ client_head->surface_id = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->id != i) {
+ client_head->id = i;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->flags != 0) {
+ client_head->flags = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
client_head->x, client_head->y);
}
- return 0;
+
+ return status;
}
static void qxl_update_offset_props(struct qxl_device *qdev)
@@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = qdev->ddev;
- while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+ int status;
+
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ while (status == MONITORS_CONFIG_BAD_CRC) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ }
+ if (status == MONITORS_CONFIG_UNCHANGED) {
+ qxl_io_log(qdev, "config unchanged\n");
+ DRM_DEBUG("ignoring unchanged client monitors config");
+ return;
}
drm_modeset_lock_all(dev);
@@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->hdisplay = head->width;
+ mode->vdisplay = head->height;
+ drm_mode_set_name(mode);
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
@@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void
+static void
qxl_send_monitors_config(struct qxl_device *qdev)
{
int i;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 84995ebc6ffc..785aad42e9bb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj,
const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
-void qxl_send_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
int qxl_destroy_monitors_object(struct qxl_device *qdev);
-/* used by qxl_debugfs only */
-void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
-
/* qxl_gem.c */
-int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_init(struct qxl_device *qdev);
void qxl_gem_fini(struct qxl_device *qdev);
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
@@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
struct qxl_drv_surface *
qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 7e305d8a4146..fd7e5e94be5b 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -191,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
*/
- qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
clips->y1, clips->y2);
image->dx = clips->x1;
image->dy = clips->y1;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index d9746e904ef1..3f185c4da5b7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj,
{
}
-int qxl_gem_init(struct qxl_device *qdev)
+void qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
- return 0;
}
void qxl_gem_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e642242728c0..af685f1d91f8 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev,
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
- INIT_LIST_HEAD(&qdev->gem.objects);
+ qxl_gem_init(qdev);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
@@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
+ qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e18839d52e3e..27affbde058c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->ddc_bus->has_aux) {
+ if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
radeon_connector->ddc_bus->has_aux = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0be8d5cd7826..60a8920fa0b9 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
+
+ /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+ if (!radeon_is_atpx_hybrid() &&
+ !radeon_has_atpx_dgpu_power_cntl())
+ rdev->flags &= ~RADEON_IS_PX;
}
/**
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 29f0207fa677..873f010d9616 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -98,17 +98,23 @@ success:
static int udl_select_std_channel(struct udl_device *udl)
{
int ret;
- u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
- 0x1C, 0x88, 0x5E, 0x15,
- 0x60, 0xFE, 0xC6, 0x97,
- 0x16, 0x3D, 0x47, 0xF2};
+ static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+ 0x1C, 0x88, 0x5E, 0x15,
+ 0x60, 0xFE, 0xC6, 0x97,
+ 0x16, 0x3D, 0x47, 0xF2};
+ void *sendbuf;
+
+ sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+ if (!sendbuf)
+ return -ENOMEM;
ret = usb_control_msg(udl->udev,
usb_sndctrlpipe(udl->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- set_def_chn, sizeof(set_def_chn),
+ sendbuf, sizeof(set_def_chn),
USB_CTRL_SET_TIMEOUT);
+ kfree(sendbuf);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 49e5996cb9f2..3b97d50fd392 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,16 +28,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
@@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ char unique[20];
- DRM_INFO("pci: %s detected\n",
- vga ? "virtio-vga" : "virtio-gpu-pci");
+ DRM_INFO("pci: %s detected at %s\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci",
+ pname);
dev->pdev = pdev;
if (vga)
virtio_pci_kick_out_firmware_fb(pdev);
+
+ snprintf(unique, sizeof(unique), "pci:%s", pname);
+ ret = drm_dev_set_unique(dev, unique);
+ if (ret)
+ goto err_free;
+
}
ret = drm_dev_register(dev, 0);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 04d98db75c64..d82489815096 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -115,7 +115,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ec1ebdcfe80b..08906c8ce3fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..974f9410474b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_vbuffer *vbuf;
- int i, size, count = 0;
+ int i, size, count = 16;
void *ptr;
INIT_LIST_HEAD(&vgdev->free_vbufs);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6cfb5cacc253..575aa65436d1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -179,6 +179,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 5614fee82347..3a84aaf1418b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
bool input = false;
int value = 0;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
feature = true;
field_index = index + sensor_inst->input_field_count;
- } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage,
+ } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage,
name) == 3) {
input = true;
field_index = index;
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
char name[HID_CUSTOM_NAME_LENGTH];
int value;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
field_index = index + sensor_inst->input_field_count;
} else
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 658a607dc6d9..c5c3d6111729 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -251,6 +251,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int report_size;
int ret = 0;
+ u8 *val_ptr;
+ int buffer_index = 0;
+ int i;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
@@ -271,7 +274,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
ret = min(report_size, buffer_size);
- memcpy(buffer, report->field[field_index]->value, ret);
+
+ val_ptr = (u8 *)report->field[field_index]->value;
+ for (i = 0; i < report->field[field_index]->report_count; ++i) {
+ if (buffer_index >= ret)
+ break;
+
+ memcpy(&((u8 *)buffer)[buffer_index], val_ptr,
+ report->field[field_index]->report_size / 8);
+ val_ptr += sizeof(__s32);
+ buffer_index += (report->field[field_index]->report_size / 8);
+ }
done_proc:
mutex_unlock(&data->mutex);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index e2517c11e0ee..0c9ac4d5d850 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -638,6 +638,58 @@ eoi:
}
/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+static int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
* _ish_hw_reset() - HW reset
* @dev: ishtp device pointer
*
@@ -649,7 +701,6 @@ static int _ish_hw_reset(struct ishtp_device *dev)
{
struct pci_dev *pdev = dev->pdev;
int rv;
- unsigned int dma_delay;
uint16_t csr;
if (!pdev)
@@ -664,15 +715,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
return -EINVAL;
}
- /* Now trigger reset to FW */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
-
- for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
- _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
- dma_delay += 5)
- mdelay(5);
-
- if (dma_delay >= MAX_DMA_DELAY) {
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
dev_err(&pdev->dev,
"Can't reset - stuck with DMA in-progress\n");
return -EBUSY;
@@ -690,16 +734,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
csr |= PCI_D0;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
-
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
-
- /* Flush writes to doorbell and REMAP2 */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
return 0;
}
@@ -758,16 +794,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
int ish_hw_start(struct ishtp_device *dev)
{
ish_set_host_rdy(dev);
- /* After that we can enable ISH DMA operation */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
- /* Flush write to doorbell */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
set_host_ready(dev);
@@ -876,6 +905,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
*/
void ish_device_disable(struct ishtp_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
dev->dev_state = ISHTP_DEV_DISABLED;
ish_clr_host_rdy(dev);
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 42f0beeb09fd..20d647d2dd2c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND,
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
KBUILD_MODNAME, dev);
if (ret) {
dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
+#ifdef CONFIG_PM
static struct device *ish_resume_device;
/**
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM
static const struct dev_pm_ops ish_pm_ops = {
.suspend = ish_suspend,
.resume = ish_resume,
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = {
#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
#else
#define ISHTP_ISH_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM */
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 354d49ea36dd..e6cfd323babc 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a259e18d22d5..0276d2ef06ee 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- dev_set_name(&child_device_obj->device, "vmbus-%pUl",
+ dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
child_device_obj->device.bus = &hv_bus;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index adae6848ffb2..a74c075a30ec 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
GFP_KERNEL);
- if (!hwdev->groups)
- return ERR_PTR(-ENOMEM);
+ if (!hwdev->groups) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
attrs = __hwmon_create_attrs(dev, drvdata, chip);
if (IS_ERR(attrs)) {
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index da3fb069ec5c..ce69048c88e9 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = adata->current_fullscale->gain;
+ *val = adata->current_fullscale->gain / 1000000;
+ *val2 = adata->current_fullscale->gain % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = adata->odr;
@@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
int err;
switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ case IIO_CHAN_INFO_SCALE: {
+ int gain;
+
+ gain = val * 1000000 + val2;
+ err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
break;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index dc33c1dd5191..b5beea53d6f6 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -30,26 +30,26 @@ static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
int scale_val0; /* scale, whole number */
- int scale_val1; /* scale, fraction in micros */
+ int scale_val1; /* scale, fraction in nanos */
} unit_conversion[] = {
- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
{HID_USAGE_SENSOR_ACCEL_3D,
HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
{HID_USAGE_SENSOR_ACCEL_3D,
- HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_GYRO_3D,
HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
{HID_USAGE_SENSOR_GYRO_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
{HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
@@ -57,7 +57,7 @@ static struct {
{HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
{HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
};
static int pow_10(unsigned power)
@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
/*
* This fuction applies the unit exponent to the scale.
* For example:
- * 9.806650 ->exp:2-> val0[980]val1[665000]
- * 9.000806 ->exp:2-> val0[900]val1[80600]
- * 0.174535 ->exp:2-> val0[17]val1[453500]
- * 1.001745 ->exp:0-> val0[1]val1[1745]
- * 1.001745 ->exp:2-> val0[100]val1[174500]
- * 1.001745 ->exp:4-> val0[10017]val1[450000]
- * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+ * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+ * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+ * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+ * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+ * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+ * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
*/
-static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+static void adjust_exponent_nano(int *val0, int *val1, int scale0,
int scale1, int exp)
{
int i;
@@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0,
if (exp > 0) {
*val0 = scale0 * pow_10(exp);
res = 0;
- if (exp > 6) {
+ if (exp > 9) {
*val1 = 0;
return;
}
for (i = 0; i < exp; ++i) {
- x = scale1 / pow_10(5 - i);
+ x = scale1 / pow_10(8 - i);
res += (pow_10(exp - 1 - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ scale1 = scale1 % pow_10(8 - i);
}
*val0 += res;
*val1 = scale1 * pow_10(exp);
} else if (exp < 0) {
exp = abs(exp);
- if (exp > 6) {
+ if (exp > 9) {
*val0 = *val1 = 0;
return;
}
*val0 = scale0 / pow_10(exp);
rem = scale0 % pow_10(exp);
res = 0;
- for (i = 0; i < (6 - exp); ++i) {
- x = scale1 / pow_10(5 - i);
- res += (pow_10(5 - exp - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ for (i = 0; i < (9 - exp); ++i) {
+ x = scale1 / pow_10(8 - i);
+ res += (pow_10(8 - exp - i) * x);
+ scale1 = scale1 % pow_10(8 - i);
}
- *val1 = rem * pow_10(6 - exp) + res;
+ *val1 = rem * pow_10(9 - exp) + res;
} else {
*val0 = scale0;
*val1 = scale1;
@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
unit_conversion[i].unit == attr_info->units) {
exp = hid_sensor_convert_exponent(
attr_info->unit_expo);
- adjust_exponent_micro(val0, val1,
+ adjust_exponent_nano(val0, val1,
unit_conversion[i].scale_val0,
unit_conversion[i].scale_val1, exp);
break;
}
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
EXPORT_SYMBOL(hid_sensor_format_scale);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 285a64a589d7..975a1f19f747 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i, len = 0;
+ int i, len = 0, q, r;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor_settings->fs.fs_avl[i].gain);
+ q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+ r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index b98b9d94d184..a97e802ca523 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
.id_table = hid_dev_rot_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
.remove = hid_dev_rot_remove,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 066161a4bccd..f962f31a5eb2 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
*val = be32_to_cpu(buf32);
break;
+ default:
+ ret = -EINVAL;
}
if (ret)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 36bf50ebb187..89a6b0546804 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
}
}
-static void cma_save_ip4_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip4_info(struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in *ip4;
-
if (src_addr) {
- ip4 = (struct sockaddr_in *)src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = local_port;
+ *src_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
+ .sin_port = local_port,
+ };
}
if (dst_addr) {
- ip4 = (struct sockaddr_in *)dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ *dst_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->src_addr.ip4.addr,
+ .sin_port = hdr->port,
+ };
}
}
-static void cma_save_ip6_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in6 *ip6;
-
if (src_addr) {
- ip6 = (struct sockaddr_in6 *)src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = local_port;
+ *src_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->dst_addr.ip6,
+ .sin6_port = local_port,
+ };
}
if (dst_addr) {
- ip6 = (struct sockaddr_in6 *)dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ *dst_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->src_addr.ip6,
+ .sin6_port = hdr->port,
+ };
}
}
@@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip4_info((struct sockaddr_in *)src_addr,
+ (struct sockaddr_in *)dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
+ (struct sockaddr_in6 *)dst_addr, hdr, port);
break;
default:
return -EAFNOSUPPORT;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 15c01c3cd540..e6f9b2d745ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
/* And we're up. Go go go! */
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
#ifdef CONFIG_PCI
- pci_request_acs();
- ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
+ pci_request_acs();
+ ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
#ifdef CONFIG_ARM_AMBA
- ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (amba_bustype.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
- return bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c841eb7a1a74..8f7281444551 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -324,8 +324,10 @@ struct arm_smmu_master_cfg {
#define INVALID_SMENDX -1
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+#define fwspec_smendx(fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
+ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
struct arm_smmu_device {
struct device *dev;
@@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and add_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ if (!fwspec->iommu_priv)
+ return -ENODEV;
+
smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
@@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev)
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
- } else if (fwspec) {
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
} else {
return -ENODEV;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a4407eabf0e6..3965e73db51c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
+again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
@@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain;
- dmar_remove_one_dev_info(domain, info->dev);
+ __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain))
+ if (!domain_type_is_vm_or_si(domain)) {
+ /*
+ * The domain_exit() function can't be called under
+ * device_domain_lock, as it takes this lock itself.
+ * So release the lock here and re-run the loop
+ * afterwards.
+ */
+ spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
+ goto again;
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 012225587c25..b71b747ee0ba 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -513,6 +513,11 @@ config DVB_AS102_FE
depends on DVB_CORE
default DVB_AS102
+config DVB_GP8PSK_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_USB_GP8PSK
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e90165ad361b..93921a4eaa27 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index db6eb79cde07..be19afeed7a9 100644
--- a/drivers/media/usb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -14,11 +14,27 @@
*
* see Documentation/dvb/README.dvb-usb for more information
*/
-#include "gp8psk.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gp8psk-fe.h"
+#include "dvb_frontend.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct gp8psk_fe_state {
struct dvb_frontend fe;
- struct dvb_usb_device *d;
+ void *priv;
+ const struct gp8psk_fe_ops *ops;
+ bool is_rev1;
u8 lock;
u16 snr;
unsigned long next_status_check;
@@ -29,22 +45,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 status;
- gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1);
+
+ st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1);
return status & bmDCtuned;
}
static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0);
}
static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
{
u8 buf[6];
if (time_after(jiffies,st->next_status_check)) {
- gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1);
- gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6);
+ st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1);
+ st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6);
st->snr = (buf[1]) << 8 | buf[0];
st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000;
}
@@ -116,13 +134,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 cmd[10];
u32 freq = c->frequency * 1000;
- int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
- deb_fe("%s()\n", __func__);
+ dprintk("%s()\n", __func__);
cmd[4] = freq & 0xff;
cmd[5] = (freq >> 8) & 0xff;
@@ -136,21 +153,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
if (c->modulation != QPSK) {
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
c->fec_inner = FEC_AUTO;
break;
case SYS_DVBS2: /* kept for backwards compatibility */
- deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
+ dprintk("%s: DVB-S2 delivery system selected\n", __func__);
break;
case SYS_TURBO:
- deb_fe("%s: Turbo-FEC delivery system selected\n", __func__);
+ dprintk("%s: Turbo-FEC delivery system selected\n", __func__);
break;
default:
- deb_fe("%s: unsupported delivery system selected (%d)\n",
+ dprintk("%s: unsupported delivery system selected (%d)\n",
__func__, c->delivery_system);
return -EOPNOTSUPP;
}
@@ -161,9 +178,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[3] = (c->symbol_rate >> 24) & 0xff;
switch (c->modulation) {
case QPSK:
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
if (gp8psk_tuned_to_DCII(fe))
- gp8psk_bcm4500_reload(state->d);
+ st->ops->reload(st->priv);
switch (c->fec_inner) {
case FEC_1_2:
cmd[9] = 0; break;
@@ -207,18 +224,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[9] = 0;
break;
default: /* Unknown modulation */
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
gp8psk_set_tuner_mode(fe, 0);
- gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10);
+ st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10);
- state->lock = 0;
- state->next_status_check = jiffies;
- state->status_check_interval = 200;
+ st->lock = 0;
+ st->next_status_check = jiffies;
+ st->status_check_interval = 200;
return 0;
}
@@ -228,9 +245,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0,
m->msg, m->msg_len)) {
return -EINVAL;
}
@@ -243,12 +260,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
/* These commands are certainly wrong */
cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01;
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0,
&cmd, 0)) {
return -EINVAL;
}
@@ -258,10 +275,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE,
- (tone == SEC_TONE_ON), 0, NULL, 0)) {
+ if (st->ops->out(st->priv, SET_22KHZ_TONE,
+ (tone == SEC_TONE_ON), 0, NULL, 0)) {
return -EINVAL;
}
return 0;
@@ -270,9 +287,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE,
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE,
voltage == SEC_VOLTAGE_18, 0, NULL, 0)) {
return -EINVAL;
}
@@ -281,52 +298,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0);
}
static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd = sw_cmd & 0x7f;
- if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0,
- NULL, 0)) {
+ if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0))
return -EINVAL;
- }
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
- 0, NULL, 0)) {
+
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
+ 0, NULL, 0))
return -EINVAL;
- }
return 0;
}
static void gp8psk_fe_release(struct dvb_frontend* fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- kfree(state);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ kfree(st);
}
static struct dvb_frontend_ops gp8psk_fe_ops;
-struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d)
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1)
{
- struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
- if (s == NULL)
- goto error;
-
- s->d = d;
- memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
- s->fe.demodulator_priv = s;
-
- goto success;
-error:
- return NULL;
-success:
- return &s->fe;
-}
+ struct gp8psk_fe_state *st;
+ if (!ops || !ops->in || !ops->out || !ops->reload) {
+ pr_err("Error! gp8psk-fe ops not defined.\n");
+ return NULL;
+ }
+
+ st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
+ st->fe.demodulator_priv = st;
+ st->ops = ops;
+ st->priv = priv;
+ st->is_rev1 = is_rev1;
+
+ pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : "");
+
+ return &st->fe;
+}
+EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
static struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h
new file mode 100644
index 000000000000..6c7944b1ecd6
--- /dev/null
+++ b/drivers/media/dvb-frontends/gp8psk-fe.h
@@ -0,0 +1,82 @@
+/*
+ * gp8psk_fe driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef GP8PSK_FE_H
+#define GP8PSK_FE_H
+
+#include <linux/types.h>
+
+/* gp8psk commands */
+
+#define GET_8PSK_CONFIG 0x80 /* in */
+#define SET_8PSK_CONFIG 0x81
+#define I2C_WRITE 0x83
+#define I2C_READ 0x84
+#define ARM_TRANSFER 0x85
+#define TUNE_8PSK 0x86
+#define GET_SIGNAL_STRENGTH 0x87 /* in */
+#define LOAD_BCM4500 0x88
+#define BOOT_8PSK 0x89 /* in */
+#define START_INTERSIL 0x8A /* in */
+#define SET_LNB_VOLTAGE 0x8B
+#define SET_22KHZ_TONE 0x8C
+#define SEND_DISEQC_COMMAND 0x8D
+#define SET_DVB_MODE 0x8E
+#define SET_DN_SWITCH 0x8F
+#define GET_SIGNAL_LOCK 0x90 /* in */
+#define GET_FW_VERS 0x92
+#define GET_SERIAL_NUMBER 0x93 /* in */
+#define USE_EXTRA_VOLT 0x94
+#define GET_FPGA_VERS 0x95
+#define CW3K_INIT 0x9d
+
+/* PSK_configuration bits */
+#define bm8pskStarted 0x01
+#define bm8pskFW_Loaded 0x02
+#define bmIntersilOn 0x04
+#define bmDVBmode 0x08
+#define bm22kHz 0x10
+#define bmSEL18V 0x20
+#define bmDCtuned 0x40
+#define bmArmed 0x80
+
+/* Satellite modulation modes */
+#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
+#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
+#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
+#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
+
+#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
+#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
+#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
+#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
+#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
+#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
+
+/* firmware revision id's */
+#define GP8PSK_FW_REV1 0x020604
+#define GP8PSK_FW_REV2 0x020704
+#define GP8PSK_FW_VERS(_fw_vers) \
+ ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
+
+struct gp8psk_fe_ops {
+ int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*reload)(void *priv);
+};
+
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1);
+
+#endif
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index f95a6bc839d5..cede3975d04b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
*protocol = RC_TYPE_RC6_MCE;
dev &= 0x7f;
dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
- toggle, vendor, dev, code);
+ *ptoggle, vendor, dev, code);
} else {
*ptoggle = 0;
*protocol = RC_TYPE_RC6_6A_32;
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 2a7b5a963acf..3b3f32b426d1 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o
dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o
obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o
-dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o
+dvb-usb-gp8psk-objs := gp8psk.o
obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o
dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index b257780fb380..7853261906b1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -53,7 +53,6 @@ struct af9005_device_state {
u8 sequence;
int led_state;
unsigned char data[256];
- struct mutex data_mutex;
};
static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
@@ -72,7 +71,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
return -EINVAL;
}
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = 14; /* rest of buffer length low */
st->data[1] = 0; /* rest of buffer length high */
@@ -140,7 +139,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
values[i] = st->data[8 + i];
ret:
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -481,7 +480,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
}
packet_len = wlen + 5;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = (u8) (packet_len & 0xff);
st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
@@ -512,7 +511,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
rbuf[i] = st->data[i + 7];
}
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -523,7 +522,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
u8 seq;
int ret, i;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
memset(st->data, 0, sizeof(st->data));
@@ -559,7 +558,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
for (i = 0; i < len; i++)
values[i] = st->data[6 + i];
}
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -847,7 +846,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
return 0;
}
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
/* deb_info("rc_query\n"); */
st->data[0] = 3; /* rest of packet length low */
@@ -890,7 +889,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
}
ret:
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -1004,20 +1003,8 @@ static struct dvb_usb_device_properties af9005_properties;
static int af9005_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- struct af9005_device_state *st;
- int ret;
-
- ret = dvb_usb_device_init(intf, &af9005_properties,
- THIS_MODULE, &d, adapter_nr);
-
- if (ret < 0)
- return ret;
-
- st = d->priv;
- mutex_init(&st->data_mutex);
-
- return 0;
+ return dvb_usb_device_init(intf, &af9005_properties,
+ THIS_MODULE, NULL, adapter_nr);
}
enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 8ac825413d5a..290275bc7fde 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -42,7 +42,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct cinergyt2_state {
u8 rc_counter;
unsigned char data[64];
- struct mutex data_mutex;
};
/* We are missing a release hook with usb_device data */
@@ -56,12 +55,12 @@ static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
struct cinergyt2_state *st = d->priv;
int ret;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
st->data[1] = enable ? 1 : 0;
ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -71,12 +70,12 @@ static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
struct cinergyt2_state *st = d->priv;
int ret;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
st->data[1] = enable ? 0 : 1;
ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -89,7 +88,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
@@ -97,7 +96,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
"state info\n");
}
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
/* Copy this pointer as we are gonna need it in the release phase */
cinergyt2_usb_device = adap->dev;
@@ -166,7 +165,7 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
*state = REMOTE_NO_KEY_PRESSED;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
@@ -202,29 +201,17 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
}
ret:
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
static int cinergyt2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- struct cinergyt2_state *st;
- int ret;
-
- ret = dvb_usb_device_init(intf, &cinergyt2_properties,
- THIS_MODULE, &d, adapter_nr);
- if (ret < 0)
- return ret;
-
- st = d->priv;
- mutex_init(&st->data_mutex);
-
- return 0;
+ return dvb_usb_device_init(intf, &cinergyt2_properties,
+ THIS_MODULE, NULL, adapter_nr);
}
-
static struct usb_device_id cinergyt2_usb_table[] = {
{ USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
{ 0 }
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 39772812269d..243403081fa5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -68,7 +68,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
wo = (rbuf == NULL || rlen == 0); /* write-only */
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = cmd;
memcpy(&st->data[1], wbuf, wlen);
if (wo)
@@ -77,7 +77,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
rbuf, rlen, 0);
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -1461,43 +1461,36 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- struct cxusb_state *st;
-
if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&cxusb_bluebird_nano2_needsfirmware_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&cxusb_bluebird_dualdig4_rev2_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
- THIS_MODULE, &d, adapter_nr) ||
- 0) {
- st = d->priv;
- mutex_init(&st->data_mutex);
-
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0)
return 0;
- }
return -EINVAL;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 9f3ee0e47d5c..18acda19527a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -37,7 +37,6 @@ struct cxusb_state {
struct i2c_client *i2c_client_tuner;
unsigned char data[MAX_XFER_SIZE];
- struct mutex data_mutex;
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 92d5408684ac..47ce9d5de4c6 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -704,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
enum rc_type protocol;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;
deb_info("%s()\n", __func__);
@@ -745,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
poll_reply->nec.data == 0x00 &&
poll_reply->nec.not_data == 0xff) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}
if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index f88572c7ae7c..fcbff7fb0c4e 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -22,7 +22,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dtt200u_state {
unsigned char data[80];
- struct mutex data_mutex;
};
static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -30,23 +29,24 @@ static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
struct dtt200u_state *st = d->priv;
int ret = 0;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = SET_INIT;
if (onoff)
ret = dvb_usb_generic_write(d, st->data, 2);
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- struct dtt200u_state *st = adap->dev->priv;
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
int ret;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = SET_STREAMING;
st->data[1] = onoff;
@@ -61,26 +61,27 @@ static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
ret = dvb_usb_generic_write(adap->dev, st->data, 1);
ret:
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
- struct dtt200u_state *st = adap->dev->priv;
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
int ret;
pid = onoff ? pid : 0;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = SET_PID_FILTER;
st->data[1] = index;
st->data[2] = pid & 0xff;
st->data[3] = (pid >> 8) & 0x1f;
ret = dvb_usb_generic_write(adap->dev, st->data, 4);
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -91,7 +92,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d)
u32 scancode;
int ret;
- mutex_lock(&st->data_mutex);
+ mutex_lock(&d->data_mutex);
st->data[0] = GET_RC_CODE;
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
@@ -126,7 +127,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d)
deb_info("st->data: %*ph\n", 5, st->data);
ret:
- mutex_unlock(&st->data_mutex);
+ mutex_unlock(&d->data_mutex);
return ret;
}
@@ -145,24 +146,17 @@ static struct dvb_usb_device_properties wt220u_miglia_properties;
static int dtt200u_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- struct dtt200u_state *st;
-
if (0 == dvb_usb_device_init(intf, &dtt200u_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_fc_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties,
- THIS_MODULE, &d, adapter_nr) ||
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_miglia_properties,
- THIS_MODULE, &d, adapter_nr)) {
- st = d->priv;
- mutex_init(&st->data_mutex);
-
+ THIS_MODULE, NULL, adapter_nr))
return 0;
- }
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 3896ba9a4179..84308569e7dc 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
{
int ret = 0;
+ mutex_init(&d->data_mutex);
mutex_init(&d->usb_mutex);
mutex_init(&d->i2c_mutex);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 639c4678c65b..107255b08b2b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -404,8 +404,12 @@ struct dvb_usb_adapter {
* Powered is in/decremented for each call to modify the state.
* @udev: pointer to the device's struct usb_device.
*
- * @usb_mutex: semaphore of USB control messages (reading needs two messages)
- * @i2c_mutex: semaphore for i2c-transfers
+ * @data_mutex: mutex to protect the data structure used to store URB data
+ * @usb_mutex: mutex of USB control messages (reading needs two messages).
+ * Please notice that this mutex is used internally at the generic
+ * URB control functions. So, drivers using dvb_usb_generic_rw() and
+ * derivated functions should not lock it internally.
+ * @i2c_mutex: mutex for i2c-transfers
*
* @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
*
@@ -433,6 +437,7 @@ struct dvb_usb_device {
int powered;
/* locking */
+ struct mutex data_mutex;
struct mutex usb_mutex;
/* i2c */
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index adfd76491451..993bb7a72985 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -15,6 +15,7 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "gp8psk.h"
+#include "gp8psk-fe.h"
/* debug */
static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw";
@@ -28,34 +29,8 @@ struct gp8psk_state {
unsigned char data[80];
};
-static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
-}
-
-static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1));
-}
-
-static void gp8psk_info(struct dvb_usb_device *d)
-{
- u8 fpga_vers, fw_vers[6];
-
- if (!gp8psk_get_fw_version(d, fw_vers))
- info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
- fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
- 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
- else
- info("failed to get FW version");
-
- if (!gp8psk_get_fpga_version(d, &fpga_vers))
- info("FPGA Version = %i", fpga_vers);
- else
- info("failed to get FPGA version");
-}
-
-int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
{
struct gp8psk_state *st = d->priv;
int ret = 0,try = 0;
@@ -67,7 +42,6 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
return ret;
while (ret >= 0 && ret != blen && try < 3) {
- memcpy(st->data, b, blen);
ret = usb_control_msg(d->udev,
usb_rcvctrlpipe(d->udev,0),
req,
@@ -81,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
if (ret < 0 || ret != blen) {
warn("usb in %d operation failed.", req);
ret = -EIO;
- } else
+ } else {
ret = 0;
+ memcpy(b, st->data, blen);
+ }
deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
@@ -92,7 +68,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
return ret;
}
-int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
struct gp8psk_state *st = d->priv;
@@ -123,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
+
+static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6);
+}
+
+static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1);
+}
+
+static void gp8psk_info(struct dvb_usb_device *d)
+{
+ u8 fpga_vers, fw_vers[6];
+
+ if (!gp8psk_get_fw_version(d, fw_vers))
+ info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
+ fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
+ 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
+ else
+ info("failed to get FW version");
+
+ if (!gp8psk_get_fpga_version(d, &fpga_vers))
+ info("FPGA Version = %i", fpga_vers);
+ else
+ info("failed to get FPGA version");
+}
+
static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
{
int ret;
@@ -225,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
return 0;
}
-int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
+static int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
{
u8 buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
+
+ deb_xfer("reloading firmware\n");
+
/* Turn off 8psk power */
if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
return -EINVAL;
@@ -247,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0);
}
+/* Callbacks for gp8psk-fe.c */
+
+static int gp8psk_fe_in(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_in_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_out(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_out_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_reload(void *priv)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_bcm4500_reload(d);
+}
+
+const struct gp8psk_fe_ops gp8psk_fe_ops = {
+ .in = gp8psk_fe_in,
+ .out = gp8psk_fe_out,
+ .reload = gp8psk_fe_reload,
+};
+
static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev);
+ struct dvb_usb_device *d = adap->dev;
+ int id = le16_to_cpu(d->udev->descriptor.idProduct);
+ int is_rev1;
+
+ is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+
+ adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
+ &gp8psk_fe_ops, d, is_rev1);
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index ed32b9da4843..d8975b866dee 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug;
#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args)
#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args)
#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args)
-#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args)
-
-/* Twinhan Vendor requests */
-#define TH_COMMAND_IN 0xC0
-#define TH_COMMAND_OUT 0xC1
-
-/* gp8psk commands */
-
-#define GET_8PSK_CONFIG 0x80 /* in */
-#define SET_8PSK_CONFIG 0x81
-#define I2C_WRITE 0x83
-#define I2C_READ 0x84
-#define ARM_TRANSFER 0x85
-#define TUNE_8PSK 0x86
-#define GET_SIGNAL_STRENGTH 0x87 /* in */
-#define LOAD_BCM4500 0x88
-#define BOOT_8PSK 0x89 /* in */
-#define START_INTERSIL 0x8A /* in */
-#define SET_LNB_VOLTAGE 0x8B
-#define SET_22KHZ_TONE 0x8C
-#define SEND_DISEQC_COMMAND 0x8D
-#define SET_DVB_MODE 0x8E
-#define SET_DN_SWITCH 0x8F
-#define GET_SIGNAL_LOCK 0x90 /* in */
-#define GET_FW_VERS 0x92
-#define GET_SERIAL_NUMBER 0x93 /* in */
-#define USE_EXTRA_VOLT 0x94
-#define GET_FPGA_VERS 0x95
-#define CW3K_INIT 0x9d
-
-/* PSK_configuration bits */
-#define bm8pskStarted 0x01
-#define bm8pskFW_Loaded 0x02
-#define bmIntersilOn 0x04
-#define bmDVBmode 0x08
-#define bm22kHz 0x10
-#define bmSEL18V 0x20
-#define bmDCtuned 0x40
-#define bmArmed 0x80
-
-/* Satellite modulation modes */
-#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
-#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
-#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
-#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
-
-#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
-#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
-#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
-#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
-#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
-#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug;
#define PRODUCT_STRING_READ 0x0D
#define FW_BCD_VERSION_READ 0x14
-/* firmware revision id's */
-#define GP8PSK_FW_REV1 0x020604
-#define GP8PSK_FW_REV2 0x020704
-#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
-
-extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
-extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
-extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen);
-extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
-
#endif
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index e9e6ea3ab73c..75b9d4ac8b1e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5a8dc5a76e0d..3678220964fe 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_async_req areq;
+ struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
@@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- areq.mrq = mrq;
- areq.err_check = mmc_test_check_result_async;
+ test_areq.areq.mrq = mrq;
+ test_areq.areq.err_check = mmc_test_check_result_async;
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &areq, &ret);
+ mmc_start_req(host, &test_areq.areq, &ret);
if (ret)
goto out_free;
} else {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 39fc5b2b96c5..df19777068a6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -26,6 +26,8 @@
#include "mmc_ops.h"
#include "sd_ops.h"
+#define DEFAULT_CMD6_TIMEOUT_MS 500
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->erased_byte = 0x0;
/* eMMC v4.5 or later */
+ card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
if (card->ext_csd.rev >= 6) {
card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4fcbc4012ed0..50a674be6655 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2940,7 +2940,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(-ENOMEM);
/* find reset controller when exist */
- pdata->rstc = devm_reset_control_get_optional(dev, NULL);
+ pdata->rstc = devm_reset_control_get_optional(dev, "reset");
if (IS_ERR(pdata->rstc)) {
if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d839147e591d..44ecebd1ea8c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
+ spin_lock_init(&host->lock);
+
ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
dev_name(&pdev->dev), host);
if (ret)
goto out_free_dma;
- spin_lock_init(&host->lock);
-
ret = mmc_add_host(mmc);
if (ret)
goto out_free_dma;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 71654b90227f..42ef3ebb1d8c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2086,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
@@ -2286,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
mrq = host->mrqs_done[i];
- if (mrq) {
- host->mrqs_done[i] = NULL;
+ if (mrq)
break;
- }
}
if (!mrq) {
@@ -2320,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
@@ -2327,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- if (!host->cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- if (!host->data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
@@ -2338,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (!sdhci_has_requests(host))
sdhci_led_deactivate(host);
+ host->mrqs_done[i] = NULL;
+
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -2512,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (!host->data) {
struct mmc_command *data_cmd = host->data_cmd;
- if (data_cmd)
- host->data_cmd = NULL;
-
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
@@ -2522,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
*/
if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
+ host->data_cmd = NULL;
/*
* Some cards handle busy-end interrupt
* before the command completed, so make
@@ -2912,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 83deda4bb4d6..6f9563a96488 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
return -ENOMEM;
bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < 0 || bytes_recv < if_version_length) {
pr_err("Could not read IF version\n");
r = -EIO;
goto err;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5e3011e31fc..5daf2f4be0cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -612,7 +612,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
ret = nvm_register(dev);
- ns->lba_shift = ilog2(dev->sec_size) - 9;
+ ns->lba_shift = ilog2(dev->sec_size);
if (sysfs_create_group(&dev->dev.kobj, attrs))
pr_warn("%s: failed to create sysfs group for identification\n",
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d687e6de24a0..a0bccb54a9bd 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_aliases, "stdout", NULL);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
- if (of_stdout)
- console_set_by_of();
}
if (!of_aliases)
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e0b22dab9b7a..e04f69beb42d 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -190,6 +190,9 @@ struct rockchip_pcie {
struct reset_control *mgmt_rst;
struct reset_control *mgmt_sticky_rst;
struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
struct clk *aclk_pcie;
struct clk *aclk_perf_pcie;
struct clk *hclk_pcie;
@@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
gpiod_set_value(rockchip->ep_gpio, 0);
+ err = reset_control_assert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "assert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "assert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "assert pm_rst err %d\n", err);
+ return err;
+ }
+
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return PTR_ERR(rockchip->pipe_rst);
}
+ rockchip->pm_rst = devm_reset_control_get(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
+
+ rockchip->pclk_rst = devm_reset_control_get(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
+
+ rockchip->aclk_rst = devm_reset_control_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
+
rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
if (IS_ERR(rockchip->ep_gpio)) {
dev_err(dev, "missing ep-gpios property in node\n");
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 66c4d8f42233..9526e341988b 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return -EINVAL;
}
+ /*
+ * If we have a shadow copy in RAM, the PCI device doesn't respond
+ * to the shadow range, so we don't need to claim it, and upstream
+ * bridges don't need to route the range to the device.
+ */
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return 0;
+
root = pci_find_parent_resource(dev, res);
if (!root) {
dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 153f3122283d..b6b316de055c 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
ret = regulator_enable(r->reg);
} else {
- regulator_disable(r->reg);
+ ret = regulator_disable(r->reg);
}
if (ret == 0)
r->on = on;
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index 32ae78c8ca17..c85fb0b59729 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
} else {
int ret;
- ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
+ "ohci-da8xx");
if (ret)
dev_warn(dev, "Failed to create usb11 phy lookup\n");
ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
@@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev)
if (!pdev->dev.of_node) {
phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
- phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx");
}
return 0;
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c
index a2b4c6b58aea..6904633cad68 100644
--- a/drivers/phy/phy-rockchip-pcie.c
+++ b/drivers/phy/phy-rockchip-pcie.c
@@ -249,21 +249,10 @@ err_refclk:
static int rockchip_pcie_phy_exit(struct phy *phy)
{
struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
- int err = 0;
clk_disable_unprepare(rk_phy->clk_pciephy_ref);
- err = reset_control_deassert(rk_phy->phy_rst);
- if (err) {
- dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
- goto err_reset;
- }
-
- return err;
-
-err_reset:
- clk_prepare_enable(rk_phy->clk_pciephy_ref);
- return err;
+ return 0;
}
static const struct phy_ops ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index b9342a2af7b3..fec34f5213c4 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy)
return ret;
}
- if (data->cfg->enable_pmu_unk1) {
+ if (phy->pmu && data->cfg->enable_pmu_unk1) {
val = readl(phy->pmu + REG_PMU_UNK1);
writel(val & ~2, phy->pmu + REG_PMU_UNK1);
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index c8c72e8259d3..87b46390b695 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -26,7 +26,7 @@
#define ASPEED_G5_NR_PINS 228
-#define COND1 SIG_DESC_BIT(SCU90, 6, 0)
+#define COND1 { SCU90, BIT(6), 0, 0 }
#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
#define B14 0
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 7f7700716398..5d1e505c3c63 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
static int __init iproc_gpio_init(void)
{
- return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe);
+ return platform_driver_register(&iproc_gpio_driver);
}
arch_initcall_sync(iproc_gpio_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 35783db1c10b..c8deb8be1da7 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
static int __init nsp_gpio_init(void)
{
- return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe);
+ return platform_driver_register(&nsp_gpio_driver);
}
arch_initcall_sync(nsp_gpio_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 47613201269a..79c4e14a5a75 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->functions)
return -ENOMEM;
+ info->group_index = 0;
if (flat_funcs) {
info->ngroups = of_get_child_count(np);
} else {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 30389f4ccab4..c43b1e9a06af 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int chv_pinctrl_suspend(struct device *dev)
+static int chv_pinctrl_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
for (i = 0; i < pctrl->community->npins; i++) {
@@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev)
ctx->padctrl1 = readl(reg);
}
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
-static int chv_pinctrl_resume(struct device *dev)
+static int chv_pinctrl_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
/*
* Mask all interrupts before restoring per-pin configuration
* registers because we don't know in which state BIOS left them
@@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev)
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
#endif
static const struct dev_pm_ops chv_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
+ chv_pinctrl_resume_noirq)
};
static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 99da4cf91031..b7bb37167969 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
if (info->irqmux_base || gpio_irq > 0) {
err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
0, handle_simple_irq,
- IRQ_TYPE_LEVEL_LOW);
+ IRQ_TYPE_NONE);
if (err) {
gpiochip_remove(&bank->gpio_chip);
dev_info(dev, "could not add irqchip\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 200667f08c37..efc43711ff5c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
- if (ret)
- return ret;
+ if (of_find_property(np, "interrupt-parent", NULL)) {
+ ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
+ if (ret)
+ return ret;
+ }
for_each_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a2323941e677..a7614fc542b5 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -934,6 +934,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Yoga 900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
+ },
+ },
+ {
.ident = "Lenovo YOGA 910-13IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ed5874217ee7..12dbb5063376 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-hid: created platform device\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 146d02f8c9bc..78080763df51 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-vbtn: created platform device\n");
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index feac4576b837..2df07ee8f3c3 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -24,14 +24,15 @@
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Azael Avalos");
MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
MODULE_LICENSE("GPL");
-#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
-MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID);
+MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
static struct input_dev *toshiba_wmi_input_dev;
@@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context)
kfree(response.pointer);
}
+static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
+ {
+ .ident = "Toshiba laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ },
+ },
+ {}
+};
+
static int __init toshiba_wmi_input_setup(void)
{
acpi_status status;
@@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void)
if (err)
goto err_free_dev;
- status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID,
+ status = wmi_install_notify_handler(WMI_EVENT_GUID,
toshiba_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
@@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void)
return 0;
err_remove_notifier:
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
err_free_keymap:
sparse_keymap_free(toshiba_wmi_input_dev);
err_free_dev:
@@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void)
static void toshiba_wmi_input_destroy(void)
{
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
sparse_keymap_free(toshiba_wmi_input_dev);
input_unregister_device(toshiba_wmi_input_dev);
}
@@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void)
{
int ret;
- if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (!wmi_has_guid(WMI_EVENT_GUID) ||
+ !dmi_check_system(toshiba_wmi_dmi_table))
return -ENODEV;
ret = toshiba_wmi_input_setup();
@@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void)
static void __exit toshiba_wmi_exit(void)
{
- if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (wmi_has_guid(WMI_EVENT_GUID))
toshiba_wmi_input_destroy();
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d1421139e6ea..2ffe029ff2b6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
/* never reached the xmit task callout */
if (tdata->skb)
__kfree_skb(tdata->skb);
- memset(tdata, 0, sizeof(*tdata));
task_release_itt(task, task->hdr_itt);
+ memset(tdata, 0, sizeof(*tdata));
+
iscsi_tcp_cleanup_task(task);
}
EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 241829e59668..7bb20684e9fa 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work)
WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
spin_unlock_irqrestore(&pg->lock, flags);
+ kref_put(&pg->kref, release_port_group);
return;
}
if (pg->flags & ALUA_SYNC_STPG)
@@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
+ sdev = NULL;
start_queue = 1;
}
}
@@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
if (start_queue &&
!queue_delayed_work(alua_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- scsi_device_put(sdev);
+ if (sdev)
+ scsi_device_put(sdev);
kref_put(&pg->kref, release_port_group);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ca86c885dfaa..3aaea713bf37 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
};
#define MEGASAS_IS_LOGICAL(scp) \
- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 209a969a979d..8aa769a2d919 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->handle = raid_device->handle;
sas_target_priv_data->sas_address = raid_device->wwid;
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
- sas_target_priv_data->raid_device = raid_device;
if (ioc->is_warpdrive)
- raid_device->starget = starget;
+ sas_target_priv_data->raid_device = raid_device;
+ raid_device->starget = starget;
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ace65db1d2a2..567fa080e261 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
srb_t *sp;
int rval;
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -1451,6 +1456,15 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /* Get a reference to the sp and drop the lock.
+ * The reference ensures this sp->done() call
+ * - and not the call in qla2xxx_eh_abort() -
+ * ends the SCSI command (with result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(&ha->hardware_lock, flags);
req->outstanding_cmds[cnt] = NULL;
sp->done(vha, sp, res);
}
@@ -2341,6 +2355,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 1;
if (!vha->host)
return 1;
if (time > vha->hw->loop_reset_delay * HZ)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 4a0d3cdc607c..15ca09cd16f3 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int result = SUCCESS;
DECLARE_COMPLETION_ONSTACK(abort_cmp);
+ int done;
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
@@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
pvscsi_abort_cmd(adapter, ctx);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/* Wait for 2 secs for the completion. */
- wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
spin_lock_irqsave(&adapter->hw_lock, flags);
- if (!completion_done(&abort_cmp)) {
+ if (!done) {
/*
* Failed to abort the command, unmark the fact that it
* was requested to be aborted.
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index c097d2ccbde3..d41292ef85f2 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 7043eb0543f6..5ab49a798164 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
* clock period is specified by user with prescaling
* already taken into account.
*/
- return counter->clock_period_ps;
+ *period_ps = counter->clock_period_ps;
+ return 0;
}
switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 34307ac3f255..d33d6fe078ad 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state,
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
mutex_unlock(&arche_pdata->platform_state_mutex);
+ put_device(&pdev->dev);
of_node_put(np);
return ret;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 5eecf1cb1028..3892a7470410 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work)
__be16 buf[2];
int val[2];
unsigned char status;
+ int ret;
mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
@@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ goto out;
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
- ad5933_i2c_read(st->client,
+ ret = ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
scan_count * 2, (u8 *)buf);
+ if (ret)
+ goto out;
if (scan_count == 2) {
val[0] = be16_to_cpu(buf[0]);
@@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work)
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
if (status & AD5933_STAT_SWEEP_DONE) {
@@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
+out:
mutex_unlock(&indio_dev->mlock);
}
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index a324322ee0ad..499952c8ef39 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev;
- char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ser_dev)
return -ENOMEM;
- ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->id.type = SERIO_8042;
ser_dev->write = ps2_sendcommand;
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
@@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
serio_register_port(ser_dev);
- /* mouse reset */
- nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
-
return 0;
}
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 955247979aaa..4ed6d8d7712a 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -601,13 +601,13 @@
#define PANEL_PLANE_TL 0x08001C
#define PANEL_PLANE_TL_TOP_SHIFT 16
-#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
-#define PANEL_PLANE_TL_LEFT_MASK 0xeff
+#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
#define PANEL_PLANE_BR 0x080020
#define PANEL_PLANE_BR_BOTTOM_SHIFT 16
-#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
-#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
+#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
#define PANEL_HORIZONTAL_TOTAL 0x080024
#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 78f0f85bebdc..fada988512a1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
- if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
- return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
@@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == IGNORE_DEVICE)
return -ENODEV;
+ memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
+
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
/* handle quirks deadly to normal probing*/
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7287a763cd0c..fea446900cad 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc)
return 0;
err4:
- phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
err3:
- phy_power_off(dwc->usb3_generic_phy);
+ phy_power_off(dwc->usb2_generic_phy);
err2:
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
err1:
usb_phy_shutdown(dwc->usb2_phy);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 89a2f712fdfe..aaaf256f71dd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/of.h>
#include "core.h"
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index fe1811650dbc..5d1bd13a56c1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -588,14 +588,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length;
- /* throttle high/super speed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)) &&
- !list_empty(&dev->tx_reqs))
- ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
- : 0;
-
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index d793f548dfe2..a9a1e4c40480 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
}
val = readl(base + ext_cap_offset);
+ /* Auto handoff never worked for these devices. Force it and continue */
+ if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
+ (pdev->vendor == PCI_VENDOR_ID_RENESAS
+ && pdev->device == 0x0014)) {
+ val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
+ writel(val, base + ext_cap_offset);
+ }
+
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 210b7e43a6fd..2440f88e07a3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev)
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
if (IS_ERR(glue->phy)) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get phy\n");
return PTR_ERR(glue->phy);
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 27dadc0d9114..e01116e4c067 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2114,11 +2114,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb->io.ep_offset = musb_flat_ep_offset;
musb->io.ep_select = musb_flat_ep_select;
}
- /* And override them with platform specific ops if specified. */
- if (musb->ops->ep_offset)
- musb->io.ep_offset = musb->ops->ep_offset;
- if (musb->ops->ep_select)
- musb->io.ep_select = musb->ops->ep_select;
/* At least tusb6010 has its own offsets */
if (musb->ops->ep_offset)
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index d059ad4d0dbd..97ee1b46db69 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
return rc;
}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index c1304b8d4985..678e93741ae1 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+ put_device(dev);
+
return (dev != NULL);
}
diff --git a/fs/aio.c b/fs/aio.c
index 1157e13a36d6..428484f2f841 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1078,6 +1078,17 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
unsigned tail, pos, head;
unsigned long flags;
+ if (kiocb->ki_flags & IOCB_WRITE) {
+ struct file *file = kiocb->ki_filp;
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission
+ * thread.
+ */
+ __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ file_end_write(file);
+ }
+
/*
* Special case handling for sync iocbs:
* - events go directly into the iocb for fast handling
@@ -1392,122 +1403,106 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
}
-typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
-
-static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
- struct iovec **iovec,
- bool compat,
- struct iov_iter *iter)
+static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
+ bool vectored, bool compat, struct iov_iter *iter)
{
+ void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
+ size_t len = iocb->aio_nbytes;
+
+ if (!vectored) {
+ ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
#ifdef CONFIG_COMPAT
if (compat)
- return compat_import_iovec(rw,
- (struct compat_iovec __user *)buf,
- len, UIO_FASTIOV, iovec, iter);
+ return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
+ iter);
#endif
- return import_iovec(rw, (struct iovec __user *)buf,
- len, UIO_FASTIOV, iovec, iter);
+ return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
}
-/*
- * aio_run_iocb:
- * Performs the initial checks and io submission.
- */
-static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
- char __user *buf, size_t len, bool compat)
+static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret)
+{
+ switch (ret) {
+ case -EIOCBQUEUED:
+ return ret;
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
+ /*
+ * There's no easy way to restart the syscall since other AIO's
+ * may be already running. Just fail this IO with EINTR.
+ */
+ ret = -EINTR;
+ /*FALLTHRU*/
+ default:
+ aio_complete(req, ret, 0);
+ return 0;
+ }
+}
+
+static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
+ bool compat)
{
struct file *file = req->ki_filp;
- ssize_t ret;
- int rw;
- fmode_t mode;
- rw_iter_op *iter_op;
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
+ ssize_t ret;
- switch (opcode) {
- case IOCB_CMD_PREAD:
- case IOCB_CMD_PREADV:
- mode = FMODE_READ;
- rw = READ;
- iter_op = file->f_op->read_iter;
- goto rw_common;
-
- case IOCB_CMD_PWRITE:
- case IOCB_CMD_PWRITEV:
- mode = FMODE_WRITE;
- rw = WRITE;
- iter_op = file->f_op->write_iter;
- goto rw_common;
-rw_common:
- if (unlikely(!(file->f_mode & mode)))
- return -EBADF;
-
- if (!iter_op)
- return -EINVAL;
-
- if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
- ret = aio_setup_vectored_rw(rw, buf, len,
- &iovec, compat, &iter);
- else {
- ret = import_single_range(rw, buf, len, iovec, &iter);
- iovec = NULL;
- }
- if (!ret)
- ret = rw_verify_area(rw, file, &req->ki_pos,
- iov_iter_count(&iter));
- if (ret < 0) {
- kfree(iovec);
- return ret;
- }
-
- if (rw == WRITE)
- file_start_write(file);
-
- ret = iter_op(req, &iter);
-
- if (rw == WRITE)
- file_end_write(file);
- kfree(iovec);
- break;
-
- case IOCB_CMD_FDSYNC:
- if (!file->f_op->aio_fsync)
- return -EINVAL;
-
- ret = file->f_op->aio_fsync(req, 1);
- break;
+ if (unlikely(!(file->f_mode & FMODE_READ)))
+ return -EBADF;
+ if (unlikely(!file->f_op->read_iter))
+ return -EINVAL;
- case IOCB_CMD_FSYNC:
- if (!file->f_op->aio_fsync)
- return -EINVAL;
+ ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
+ if (ret)
+ return ret;
+ ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
+ if (!ret)
+ ret = aio_ret(req, file->f_op->read_iter(req, &iter));
+ kfree(iovec);
+ return ret;
+}
- ret = file->f_op->aio_fsync(req, 0);
- break;
+static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
+ bool compat)
+{
+ struct file *file = req->ki_filp;
+ struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+ struct iov_iter iter;
+ ssize_t ret;
- default:
- pr_debug("EINVAL: no operation provided\n");
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ return -EBADF;
+ if (unlikely(!file->f_op->write_iter))
return -EINVAL;
- }
- if (ret != -EIOCBQUEUED) {
+ ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
+ if (ret)
+ return ret;
+ ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
+ if (!ret) {
+ req->ki_flags |= IOCB_WRITE;
+ file_start_write(file);
+ ret = aio_ret(req, file->f_op->write_iter(req, &iter));
/*
- * There's no easy way to restart the syscall since other AIO's
- * may be already running. Just fail this IO with EINTR.
+ * We release freeze protection in aio_complete(). Fool lockdep
+ * by telling it the lock got released so that it doesn't
+ * complain about held lock when we return to userspace.
*/
- if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
- ret == -ERESTARTNOHAND ||
- ret == -ERESTART_RESTARTBLOCK))
- ret = -EINTR;
- aio_complete(req, ret, 0);
+ __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-
- return 0;
+ kfree(iovec);
+ return ret;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, bool compat)
{
struct aio_kiocb *req;
+ struct file *file;
ssize_t ret;
/* enforce forwards compatibility on users */
@@ -1530,7 +1525,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
if (unlikely(!req))
return -EAGAIN;
- req->common.ki_filp = fget(iocb->aio_fildes);
+ req->common.ki_filp = file = fget(iocb->aio_fildes);
if (unlikely(!req->common.ki_filp)) {
ret = -EBADF;
goto out_put_req;
@@ -1565,13 +1560,29 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_user_iocb = user_iocb;
req->ki_user_data = iocb->aio_data;
- ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode,
- (char __user *)(unsigned long)iocb->aio_buf,
- iocb->aio_nbytes,
- compat);
- if (ret)
- goto out_put_req;
+ get_file(file);
+ switch (iocb->aio_lio_opcode) {
+ case IOCB_CMD_PREAD:
+ ret = aio_read(&req->common, iocb, false, compat);
+ break;
+ case IOCB_CMD_PWRITE:
+ ret = aio_write(&req->common, iocb, false, compat);
+ break;
+ case IOCB_CMD_PREADV:
+ ret = aio_read(&req->common, iocb, true, compat);
+ break;
+ case IOCB_CMD_PWRITEV:
+ ret = aio_write(&req->common, iocb, true, compat);
+ break;
+ default:
+ pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
+ ret = -EINVAL;
+ break;
+ }
+ fput(file);
+ if (ret && ret != -EIOCBQUEUED)
+ goto out_put_req;
return 0;
out_put_req:
put_reqs_available(ctx, 1);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 18630e800208..f995e3528a33 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1770,7 +1770,6 @@ const struct file_operations ceph_file_fops = {
.fsync = ceph_fsync,
.lock = ceph_lock,
.flock = ceph_flock,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
.compat_ioctl = ceph_ioctl,
diff --git a/fs/coredump.c b/fs/coredump.c
index 281b768000e6..eb9c92c9b20f 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1,6 +1,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/freezer.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
if (core_waiters > 0) {
struct core_thread *ptr;
+ freezer_do_not_count();
wait_for_completion(&core_state->startup);
+ freezer_count();
/*
* Wait for all the threads to become inactive, so that
* all the thread context (extended register state, like
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7555ba889d1f..ebecfb8fba06 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -314,7 +314,8 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
/* Match the full socket address */
if (!rpc_cmp_addr_port(sap, clap))
/* Match all xprt_switch full socket addresses */
- if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
+ if (IS_ERR(clp->cl_rpcclient) ||
+ !rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
sap))
continue;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index c8162c660c44..5551e8ef67fd 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -98,7 +98,7 @@ rename_retry:
return end;
}
namelen = strlen(base);
- if (flags & NFS_PATH_CANONICAL) {
+ if (*end == '/') {
/* Strip off excess slashes in base string */
while (namelen > 0 && base[namelen - 1] == '/')
namelen--;
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index b62973045a3e..a61350f75c74 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -178,12 +178,14 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
__must_hold(&tbl->slot_tbl_lock)
{
struct nfs4_slot *slot;
+ int ret;
slot = nfs4_lookup_slot(tbl, slotid);
- if (IS_ERR(slot))
- return PTR_ERR(slot);
- *seq_nr = slot->seq_nr;
- return 0;
+ ret = PTR_ERR_OR_ZERO(slot);
+ if (!ret)
+ *seq_nr = slot->seq_nr;
+
+ return ret;
}
/*
@@ -196,7 +198,7 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
u32 slotid, u32 seq_nr)
{
- u32 cur_seq;
+ u32 cur_seq = 0;
bool ret = false;
spin_lock(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 56b2d96f9103..259ef85f435a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -146,6 +146,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
u32 id;
int i;
+ if (fsinfo->nlayouttypes == 0)
+ goto out_no_driver;
if (!(server->nfs_client->cl_exchange_flags &
(EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index a18613579001..0ee19ecc982d 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1544,8 +1544,6 @@ const struct file_operations ntfs_dir_ops = {
.iterate = ntfs_readdir, /* Read directory contents. */
#ifdef NTFS_RW
.fsync = ntfs_dir_fsync, /* Sync a directory to disk. */
- /*.aio_fsync = ,*/ /* Sync all outstanding async
- i/o operations on a kiocb. */
#endif /* NTFS_RW */
/*.ioctl = ,*/ /* Perform function on the
mounted filesystem. */
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e7054e2ac922..3ecb9f337b7d 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
struct ocfs2_dx_root_block *dx_root)
{
- int credits = ocfs2_clusters_to_blocks(osb->sb, 2);
+ int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
credits += ocfs2_quota_trans_credits(osb->sb);
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index eb09aa026723..d484068ca716 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -141,6 +141,9 @@ static struct client_debug_mask client_debug_mask;
*/
static DEFINE_MUTEX(orangefs_debug_lock);
+/* Used to protect data in ORANGEFS_KMOD_DEBUG_HELP_FILE */
+static DEFINE_MUTEX(orangefs_help_file_lock);
+
/*
* initialize kmod debug operations, create orangefs debugfs dir and
* ORANGEFS_KMOD_DEBUG_HELP_FILE.
@@ -289,6 +292,8 @@ static void *help_start(struct seq_file *m, loff_t *pos)
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n");
+ mutex_lock(&orangefs_help_file_lock);
+
if (*pos == 0)
payload = m->private;
@@ -305,6 +310,7 @@ static void *help_next(struct seq_file *m, void *v, loff_t *pos)
static void help_stop(struct seq_file *m, void *p)
{
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n");
+ mutex_unlock(&orangefs_help_file_lock);
}
static int help_show(struct seq_file *m, void *v)
@@ -610,32 +616,54 @@ out:
* /sys/kernel/debug/orangefs/debug-help can be catted to
* see all the available kernel and client debug keywords.
*
- * When the kernel boots, we have no idea what keywords the
+ * When orangefs.ko initializes, we have no idea what keywords the
* client supports, nor their associated masks.
*
- * We pass through this function once at boot and stamp a
+ * We pass through this function once at module-load and stamp a
* boilerplate "we don't know" message for the client in the
* debug-help file. We pass through here again when the client
* starts and then we can fill out the debug-help file fully.
*
* The client might be restarted any number of times between
- * reboots, we only build the debug-help file the first time.
+ * module reloads, we only build the debug-help file the first time.
*/
int orangefs_prepare_debugfs_help_string(int at_boot)
{
- int rc = -EINVAL;
- int i;
- int byte_count = 0;
char *client_title = "Client Debug Keywords:\n";
char *kernel_title = "Kernel Debug Keywords:\n";
+ size_t string_size = DEBUG_HELP_STRING_SIZE;
+ size_t result_size;
+ size_t i;
+ char *new;
+ int rc = -EINVAL;
gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
- if (at_boot) {
- byte_count += strlen(HELP_STRING_UNINITIALIZED);
+ if (at_boot)
client_title = HELP_STRING_UNINITIALIZED;
- } else {
- /*
+
+ /* build a new debug_help_string. */
+ new = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
+ if (!new) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * strlcat(dst, src, size) will append at most
+ * "size - strlen(dst) - 1" bytes of src onto dst,
+ * null terminating the result, and return the total
+ * length of the string it tried to create.
+ *
+ * We'll just plow through here building our new debug
+ * help string and let strlcat take care of assuring that
+ * dst doesn't overflow.
+ */
+ strlcat(new, client_title, string_size);
+
+ if (!at_boot) {
+
+ /*
* fill the client keyword/mask array and remember
* how many elements there were.
*/
@@ -644,64 +672,40 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
if (cdm_element_count <= 0)
goto out;
- /* Count the bytes destined for debug_help_string. */
- byte_count += strlen(client_title);
-
for (i = 0; i < cdm_element_count; i++) {
- byte_count += strlen(cdm_array[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 1!\n", __func__);
- goto out;
- }
+ strlcat(new, "\t", string_size);
+ strlcat(new, cdm_array[i].keyword, string_size);
+ strlcat(new, "\n", string_size);
}
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "%s: cdm_element_count:%d:\n",
- __func__,
- cdm_element_count);
}
- byte_count += strlen(kernel_title);
+ strlcat(new, "\n", string_size);
+ strlcat(new, kernel_title, string_size);
+
for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- byte_count +=
- strlen(s_kmod_keyword_mask_map[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 2!\n", __func__);
- goto out;
- }
+ strlcat(new, "\t", string_size);
+ strlcat(new, s_kmod_keyword_mask_map[i].keyword, string_size);
+ result_size = strlcat(new, "\n", string_size);
}
- /* build debug_help_string. */
- debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
- if (!debug_help_string) {
- rc = -ENOMEM;
+ /* See if we tried to put too many bytes into "new"... */
+ if (result_size >= string_size) {
+ kfree(new);
goto out;
}
- strcat(debug_help_string, client_title);
-
- if (!at_boot) {
- for (i = 0; i < cdm_element_count; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, cdm_array[i].keyword);
- strcat(debug_help_string, "\n");
- }
- }
-
- strcat(debug_help_string, "\n");
- strcat(debug_help_string, kernel_title);
-
- for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
- strcat(debug_help_string, "\n");
+ if (at_boot) {
+ debug_help_string = new;
+ } else {
+ mutex_lock(&orangefs_help_file_lock);
+ memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
+ strlcat(debug_help_string, new, string_size);
+ mutex_unlock(&orangefs_help_file_lock);
}
rc = 0;
-out:
-
- return rc;
+out: return rc;
}
@@ -959,8 +963,12 @@ int orangefs_debugfs_new_client_string(void __user *arg)
ret = copy_from_user(&client_debug_array_string,
(void __user *)arg,
ORANGEFS_MAX_DEBUG_STRING_LEN);
- if (ret != 0)
+
+ if (ret != 0) {
+ pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+ __func__);
return -EIO;
+ }
/*
* The real client-core makes an effort to ensure
@@ -975,45 +983,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
'\0';
- if (ret != 0) {
- pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
- __func__);
- return -EIO;
- }
-
pr_info("%s: client debug array string has been received.\n",
__func__);
if (!help_string_initialized) {
- /* Free the "we don't know yet" default string... */
- kfree(debug_help_string);
-
- /* build a proper debug help string */
+ /* Build a proper debug help string. */
if (orangefs_prepare_debugfs_help_string(0)) {
gossip_err("%s: no debug help string \n",
__func__);
return -EIO;
}
- /* Replace the boilerplate boot-time debug-help file. */
- debugfs_remove(help_file_dentry);
-
- help_file_dentry =
- debugfs_create_file(
- ORANGEFS_KMOD_DEBUG_HELP_FILE,
- 0444,
- debug_dir,
- debug_help_string,
- &debug_help_fops);
-
- if (!help_file_dentry) {
- gossip_err("%s: debugfs_create_file failed for"
- " :%s:!\n",
- __func__,
- ORANGEFS_KMOD_DEBUG_HELP_FILE);
- return -EIO;
- }
}
debug_mask_to_string(&client_debug_mask, 1);
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index 2e5b03065f34..4113eb0495bf 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -124,7 +124,7 @@ static int __init orangefs_init(void)
* unknown at boot time.
*
* orangefs_prepare_debugfs_help_string will be used again
- * later to rebuild the debug-help file after the client starts
+ * later to rebuild the debug-help-string after the client starts
* and passes along the needed info. The argument signifies
* which time orangefs_prepare_debugfs_help_string is being
* called.
@@ -152,7 +152,9 @@ static int __init orangefs_init(void)
ret = register_filesystem(&orangefs_fs_type);
if (ret == 0) {
- pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION);
+ pr_info("%s: module version %s loaded\n",
+ __func__,
+ ORANGEFS_VERSION);
ret = 0;
goto out;
}
diff --git a/fs/splice.c b/fs/splice.c
index 153d4f3bd441..dcaf185a5731 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -299,13 +299,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- loff_t isize;
int idx, ret;
- isize = i_size_read(in->f_mapping->host);
- if (unlikely(*ppos >= isize))
- return 0;
-
iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
idx = to.idx;
init_sync_kiocb(&kiocb, in);
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 613c5cf19436..5c2929f94bd3 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -199,9 +199,9 @@ xfs_defer_intake_work(
struct xfs_defer_pending *dfp;
list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
- trace_xfs_defer_intake_work(tp->t_mountp, dfp);
dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
dfp->dfp_count);
+ trace_xfs_defer_intake_work(tp->t_mountp, dfp);
list_sort(tp->t_mountp, &dfp->dfp_work,
dfp->dfp_type->diff_items);
list_for_each(li, &dfp->dfp_work)
@@ -221,21 +221,14 @@ xfs_defer_trans_abort(
struct xfs_defer_pending *dfp;
trace_xfs_defer_trans_abort(tp->t_mountp, dop);
- /*
- * If the transaction was committed, drop the intent reference
- * since we're bailing out of here. The other reference is
- * dropped when the intent hits the AIL. If the transaction
- * was not committed, the intent is freed by the intent item
- * unlock handler on abort.
- */
- if (!dop->dop_committed)
- return;
- /* Abort intent items. */
+ /* Abort intent items that don't have a done item. */
list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
- if (!dfp->dfp_done)
+ if (dfp->dfp_intent && !dfp->dfp_done) {
dfp->dfp_type->abort_intent(dfp->dfp_intent);
+ dfp->dfp_intent = NULL;
+ }
}
/* Shut down FS. */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 40e887068da2..0504ef8f3aa3 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -118,9 +118,9 @@ do { \
#define this_cpu_generic_read(pcp) \
({ \
typeof(pcp) __ret; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
__ret = raw_cpu_generic_read(pcp); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
__ret; \
})
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index af0254c09424..4df64a1fc09e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,6 +14,8 @@
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections
+ * [__start_data_ro_after_init, __end_data_ro_after_init]:
+ * contains data.ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections
@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
+extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 30747960bc54..31e1d639abed 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -259,7 +259,10 @@
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
-#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
+#define RO_AFTER_INIT_DATA \
+ __start_data_ro_after_init = .; \
+ *(.data..ro_after_init) \
+ __end_data_ro_after_init = .;
#endif
/*
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b352a7b812e6..a9cfd33c7b1a 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -779,7 +779,6 @@ extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-
/*@}*/
/* PCI section */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 52bf44e2b5cc..c4fc49583dc0 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -429,4 +429,7 @@ void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
void drm_unplug_dev(struct drm_device *dev);
+int drm_dev_set_unique(struct drm_device *dev, const char *name);
+
+
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d440bc11..38eabf65f19d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -330,7 +330,6 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 689a8b9b9c8f..61a3d90f32b3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -555,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
void acpi_walk_dep_device_list(acpi_handle handle);
-struct platform_device *acpi_create_platform_device(struct acpi_device *);
+struct platform_device *acpi_create_platform_device(struct acpi_device *,
+ struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 96337b15a60d..a8e66344bacc 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -258,6 +258,8 @@ struct ceph_watch_item {
struct ceph_entity_addr addr;
};
+#define CEPH_LINGER_ID_START 0xffff000000000000ULL
+
struct ceph_osd_client {
struct ceph_client *client;
diff --git a/include/linux/console.h b/include/linux/console.h
index 3672809234a7..d530c4627e54 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
-#ifdef CONFIG_OF
-extern void console_set_by_of(void);
-#else
-static inline void console_set_by_of(void) {}
-#endif
-
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index c46d2aa16d81..1d18af034554 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map)
{
- if (frontswap_enabled())
- __frontswap_init(type, map);
+#ifdef CONFIG_FRONTSWAP
+ __frontswap_init(type, map);
+#endif
}
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 16d2b6e874d6..dc0478c07b2a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -321,6 +321,7 @@ struct writeback_control;
#define IOCB_HIPRI (1 << 3)
#define IOCB_DSYNC (1 << 4)
#define IOCB_SYNC (1 << 5)
+#define IOCB_WRITE (1 << 6)
struct kiocb {
struct file *ki_filp;
@@ -1709,7 +1710,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index ee1bed7dbfc6..78bb0d7f6b11 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
return -ENOSYS;
}
+static inline int phy_reset(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03725fe89859..1c12a350eca3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -389,6 +389,11 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 33d00a4ce656..819d895edfdc 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,12 +18,6 @@
#include <linux/types.h>
#include <sound/asound.h>
-#ifndef __KERNEL__
-#error This API is an early revision and not enabled in the current
-#error kernel release, it will be enabled in a future kernel version
-#error with incompatible changes to what is here.
-#endif
-
/*
* Maximum number of channels topology kcontrol can represent.
*/
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 084452e34a12..bdff5ed57f10 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -203,8 +203,10 @@ static int __init test_suspend(void)
/* RTCs have initialized by now too ... can we use one? */
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
- if (dev)
+ if (dev) {
rtc = rtc_class_open(dev_name(dev));
+ put_device(dev);
+ }
if (!rtc) {
printk(warn_no_rtc);
return 0;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index de08fc90baaf..5028f4fd504a 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -253,17 +253,6 @@ static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
-#ifdef CONFIG_OF
-static bool of_specified_console;
-
-void console_set_by_of(void)
-{
- of_specified_console = true;
-}
-#else
-# define of_specified_console false
-#endif
-
/* Flag: console code may call schedule() */
static int console_may_schedule;
@@ -2657,7 +2646,7 @@ void register_console(struct console *newcon)
* didn't select a console we take the first one
* that registers here.
*/
- if (preferred_console < 0 && !of_specified_console) {
+ if (preferred_console < 0) {
if (newcon->index < 0)
newcon->index = 0;
if (newcon->setup == NULL ||
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4d830e299989..f87d138e9672 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
trace->entries = stack->entries;
trace->skip = 0;
}
+EXPORT_SYMBOL_GPL(depot_fetch_stack);
/**
* depot_save_stack - save stack in a stack depot.
@@ -283,3 +284,4 @@ exit:
fast_exit:
return retval;
}
+EXPORT_SYMBOL_GPL(depot_save_stack);
diff --git a/mm/cma.c b/mm/cma.c
index 384c2cb51b56..c960459eda7e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+ if (bitmap_count > bitmap_maxno)
+ return NULL;
+
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
diff --git a/mm/filemap.c b/mm/filemap.c
index c7fe2f16503f..50b52fe51937 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1732,6 +1732,9 @@ find_page:
if (inode->i_blkbits == PAGE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
+ /* pipes can't handle partially uptodate pages */
+ if (unlikely(iter->type & ITER_PIPE))
+ goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec49d9ef1eef..418bf01a50ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and
* take appropriate action.
+ *
+ * vma_add_reservation is used in error paths where a reservation must
+ * be restored when a newly allocated huge page must be freed. It is
+ * to be called after calling vma_needs_reservation to determine if a
+ * reservation exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
+ VMA_ADD_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h,
region_abort(resv, idx, idx + 1);
ret = 0;
break;
+ case VMA_ADD_RESV:
+ if (vma->vm_flags & VM_MAYSHARE)
+ ret = region_add(resv, idx, idx + 1);
+ else {
+ region_abort(resv, idx, idx + 1);
+ ret = region_del(resv, idx, idx + 1);
+ }
+ break;
default:
BUG();
}
@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h,
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
}
+static long vma_add_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+}
+
+/*
+ * This routine is called to restore a reservation on error paths. In the
+ * specific error paths, a huge page was allocated (via alloc_huge_page)
+ * and is about to be freed. If a reservation for the page existed,
+ * alloc_huge_page would have consumed the reservation and set PagePrivate
+ * in the newly allocated page. When the page is freed via free_huge_page,
+ * the global reservation count will be incremented if PagePrivate is set.
+ * However, free_huge_page can not adjust the reserve map. Adjust the
+ * reserve map here to be consistent with global reserve count adjustments
+ * to be made by free_huge_page.
+ */
+static void restore_reserve_on_error(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long address,
+ struct page *page)
+{
+ if (unlikely(PagePrivate(page))) {
+ long rc = vma_needs_reservation(h, vma, address);
+
+ if (unlikely(rc < 0)) {
+ /*
+ * Rare out of memory condition in reserve map
+ * manipulation. Clear PagePrivate so that
+ * global reserve count will not be incremented
+ * by free_huge_page. This will make it appear
+ * as though the reservation for this page was
+ * consumed. This may prevent the task from
+ * faulting in the page at a later time. This
+ * is better than inconsistent global huge page
+ * accounting of reserve counts.
+ */
+ ClearPagePrivate(page);
+ } else if (rc) {
+ rc = vma_add_reservation(h, vma, address);
+ if (unlikely(rc < 0))
+ /*
+ * See above comment about rare out of
+ * memory condition.
+ */
+ ClearPagePrivate(page);
+ } else
+ vma_end_reservation(h, vma, address);
+ }
+}
+
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
@@ -3498,6 +3562,7 @@ retry_avoidcopy:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all:
+ restore_reserve_on_error(h, vma, address, new_page);
put_page(new_page);
out_release_old:
put_page(old_page);
@@ -3680,6 +3745,7 @@ backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
+ restore_reserve_on_error(h, vma, address, page);
put_page(page);
goto out;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e5355a5b423f..d1380ed93fdf 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void)
/* data/bss scanning */
scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop);
+ scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index de88f33519c0..19e796d36a62 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- lock_page(hpage);
- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
- unlock_page(hpage);
- if (!PageAnon(hpage))
+ lock_page(p);
+ if (!PageAnon(p) || unlikely(split_huge_page(p))) {
+ unlock_page(p);
+ if (!PageAnon(p))
pr_err("Memory failure: %#lx: non anonymous thp\n",
pfn);
else
@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
put_hwpoison_page(p);
return -EBUSY;
}
- unlock_page(hpage);
- get_hwpoison_page(p);
- put_hwpoison_page(hpage);
+ unlock_page(p);
VM_BUG_ON_PAGE(!page_count(p), p);
hpage = compound_head(p);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 072d791dce2d..6de9440e3ae2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3658,7 +3658,7 @@ retry:
/* Make sure we know about allocations which stall for too long */
if (time_after(jiffies, alloc_start + stall_timeout)) {
warn_alloc(gfp_mask,
- "page alloction stalls for %ums, order:%u\n",
+ "page allocation stalls for %ums, order:%u",
jiffies_to_msecs(jiffies-alloc_start), order);
stall_timeout += 10 * HZ;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index ad7813d73ea7..166ebf5d2bce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
+ __SetPageLocked(newpage);
+ __SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 71f0b28a1bec..329b03843863 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align,
- root_cache->flags, root_cache->ctor,
- memcg, root_cache);
+ root_cache->flags & CACHE_CREATE_MASK,
+ root_cache->ctor, memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
* that's not critical at all as we can always proceed with the root
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2210de290b54..f30438970cd1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index 7d54e944de5e..dcbe67ff3e2b 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
fl->object_size = le32_to_cpu(legacy->fl_object_size);
fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
- if (fl->pool_id == 0)
+ if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+ fl->stripe_count == 0 && fl->object_size == 0)
fl->pool_id = -1;
}
EXPORT_SYMBOL(ceph_file_layout_from_legacy);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d9bf7a1d0a58..e6ae15bc41b7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4094,6 +4094,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
osd_init(&osdc->homeless_osd);
osdc->homeless_osd.o_osdc = osdc;
osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
+ osdc->last_linger_id = CEPH_LINGER_ID_START;
osdc->linger_requests = RB_ROOT;
osdc->map_checks = RB_ROOT;
osdc->linger_map_checks = RB_ROOT;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 34dd7b26ee5f..62a482790937 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2753,14 +2753,18 @@ EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
{
+ rcu_read_lock();
xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
{
+ rcu_read_lock();
rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
xprt);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
@@ -2770,9 +2774,8 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
struct rpc_xprt_switch *xps;
bool ret;
- xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
-
rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
ret = rpc_xprt_switch_has_addr(xps, sap);
rcu_read_unlock();
return ret;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 210949562786..26b26beef2d4 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -44,18 +44,20 @@
* being done.
*
* When the underlying transport disconnects, MRs are left in one of
- * three states:
+ * four states:
*
* INVALID: The MR was not in use before the QP entered ERROR state.
- * (Or, the LOCAL_INV WR has not completed or flushed yet).
- *
- * STALE: The MR was being registered or unregistered when the QP
- * entered ERROR state, and the pending WR was flushed.
*
* VALID: The MR was registered before the QP entered ERROR state.
*
- * When frwr_op_map encounters STALE and VALID MRs, they are recovered
- * with ib_dereg_mr and then are re-initialized. Beause MR recovery
+ * FLUSHED_FR: The MR was being registered when the QP entered ERROR
+ * state, and the pending WR was flushed.
+ *
+ * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
+ * state, and the pending WR was flushed.
+ *
+ * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
+ * with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
* recovered MRs are placed back on the rb_mws list when recovery is
* complete. frwr_op_map allocates another MR for the current RPC while
@@ -177,12 +179,15 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
static void
frwr_op_recover_mr(struct rpcrdma_mw *mw)
{
+ enum rpcrdma_frmr_state state = mw->frmr.fr_state;
struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc;
rc = __frwr_reset_mr(ia, mw);
- ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (state != FRMR_FLUSHED_LI)
+ ib_dma_unmap_sg(ia->ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
if (rc)
goto out_release;
@@ -262,10 +267,8 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
}
static void
-__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
- const char *wr)
+__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
{
- frmr->fr_state = FRMR_IS_STALE;
if (wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
wr, ib_wc_status_msg(wc->status),
@@ -288,7 +291,8 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS) {
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- __frwr_sendcompletion_flush(wc, frmr, "fastreg");
+ frmr->fr_state = FRMR_FLUSHED_FR;
+ __frwr_sendcompletion_flush(wc, "fastreg");
}
}
@@ -308,7 +312,8 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS) {
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ frmr->fr_state = FRMR_FLUSHED_LI;
+ __frwr_sendcompletion_flush(wc, "localinv");
}
}
@@ -328,8 +333,10 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- if (wc->status != IB_WC_SUCCESS)
- __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ if (wc->status != IB_WC_SUCCESS) {
+ frmr->fr_state = FRMR_FLUSHED_LI;
+ __frwr_sendcompletion_flush(wc, "localinv");
+ }
complete(&frmr->fr_linv_done);
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0d35b761c883..6e1bba358203 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -216,7 +216,8 @@ struct rpcrdma_rep {
enum rpcrdma_frmr_state {
FRMR_IS_INVALID, /* ready to be used */
FRMR_IS_VALID, /* in use */
- FRMR_IS_STALE, /* failed completion */
+ FRMR_FLUSHED_FR, /* flushed FASTREG WR */
+ FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
};
struct rpcrdma_frmr {
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 53449a6ff6aa..7c321a603b07 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -36,6 +36,7 @@ warning-2 += -Wshadow
warning-2 += $(call cc-option, -Wlogical-op)
warning-2 += $(call cc-option, -Wmissing-field-initializers)
warning-2 += $(call cc-option, -Wsign-compare)
+warning-2 += $(call cc-option, -Wmaybe-uninitialized)
warning-3 := -Wbad-function-cast
warning-3 += -Wcast-qual
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index dd779c40c8e6..3b1b13818d59 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -17,4 +17,8 @@ endif
ifdef CONFIG_UBSAN_NULL
CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
endif
+
+ # -fsanitize=* options makes GCC less smart than usual and
+ # increase number of 'maybe-uninitialized false-positives
+ CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
endif
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 19f5adfd877d..d9ff038c1b28 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -8,6 +8,9 @@
# of the GNU General Public License, incorporated herein by reference.
import sys, os, re
+from signal import signal, SIGPIPE, SIG_DFL
+
+signal(SIGPIPE, SIG_DFL)
if len(sys.argv) != 3:
sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
diff --git a/sound/core/info.c b/sound/core/info.c
index 895362a696c9..8ab72e0f5932 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file,
size_t next;
int err = 0;
+ if (!entry->c.text.write)
+ return -EIO;
pos = *offset;
if (!valid_pos(pos, count))
return -EIO;
next = pos + count;
+ /* don't handle too large text inputs */
+ if (next > 16 * 1024)
+ return -EIO;
mutex_lock(&entry->access);
buf = data->wbuffer;
if (!buf) {
@@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p)
struct snd_info_private_data *data = seq->private;
struct snd_info_entry *entry = data->entry;
- if (entry->c.text.read) {
+ if (!entry->c.text.read) {
+ return -EIO;
+ } else {
data->rbuffer->buffer = (char *)seq; /* XXX hack! */
entry->c.text.read(entry, data->rbuffer);
}
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 18baea2f7d65..84f86745c30e 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
};
static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
- { "Capture", NULL, "AINA" },
- { "Capture", NULL, "AINB" },
+ { "Capture", NULL, "AINL" },
+ { "Capture", NULL, "AINR" },
- { "AOUTA", NULL, "Playback" },
- { "AOUTB", NULL, "Playback" },
+ { "AOUTL", NULL, "Playback" },
+ { "AOUTR", NULL, "Playback" },
};
/**
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 1152aa5e7c39..cf37936bfe3a 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -880,7 +880,8 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* DAI */
- SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, DA7219_DAI_TDM_CTRL,
+ DA7219_DAI_OE_SHIFT, DA7219_NO_INVERT),
SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
/* Output Muxes */
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b904492d7744..90b5948e0ff3 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -364,7 +364,12 @@ static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
struct of_phandle_args *args,
const char **dai_name)
{
- int id = args->args[0];
+ int id;
+
+ if (args->args_count)
+ id = args->args[0];
+ else
+ id = 0;
if (id < ARRAY_SIZE(hdmi_dai_name)) {
*dai_name = hdmi_dai_name[id];
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 55558643166f..2db8179047ae 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -249,6 +249,11 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
snd_soc_dapm_force_enable_pin(dapm, "LDO1");
snd_soc_dapm_sync(dapm);
+ regmap_update_bits(rt298->regmap,
+ RT298_POWER_CTRL1, 0x1001, 0);
+ regmap_update_bits(rt298->regmap,
+ RT298_POWER_CTRL2, 0x4, 0x4);
+
regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
msleep(50);
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 01a18d88f1eb..00ff2788879e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1547,11 +1547,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
msleep(sleep_time[i]);
val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) &
0x0003;
+ dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
+ __func__, val, sleep_time[i]);
i++;
if (val == 0x1 || val == 0x2 || val == 0x3)
break;
- dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
- __func__, val, sleep_time[i]);
}
dev_dbg(codec->dev, "%s val = %d\n", __func__, val);
switch (val) {
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index 7b31ee9b82bc..d6e00c77edcd 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -424,7 +424,7 @@ static const struct snd_soc_dai_ops stih407_dac_ops = {
static const struct regmap_config stih407_sas_regmap = {
.reg_bits = 32,
.val_bits = 32,
-
+ .fast_io = true,
.max_register = STIH407_AUDIO_DAC_CTRL,
.reg_defaults = stih407_sas_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index df5e5cb33baa..810369f687d7 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -341,20 +341,9 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
-
- gpiod_set_value(priv->pdn_gpio, 0);
- usleep_range(5000, 6000);
-
- regcache_cache_only(priv->regmap, false);
- ret = regcache_sync(priv->regmap);
- if (ret)
- return ret;
}
break;
case SND_SOC_BIAS_OFF:
- regcache_cache_only(priv->regmap, true);
- gpiod_set_value(priv->pdn_gpio, 1);
-
if (!IS_ERR(priv->mclk))
clk_disable_unprepare(priv->mclk);
break;
@@ -401,16 +390,6 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
TAS571X_SOFT_MUTE_REG,
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
-
- SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
- TAS5717_CH1_LEFT_CH_MIX_REG,
- TAS5717_CH1_RIGHT_CH_MIX_REG,
- 16, 0, 0x80, 0),
-
- SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
- TAS5717_CH2_LEFT_CH_MIX_REG,
- TAS5717_CH2_RIGHT_CH_MIX_REG,
- 16, 0, 0x80, 0),
};
static const struct regmap_range tas571x_readonly_regs_range[] = {
@@ -488,6 +467,16 @@ static const struct snd_kcontrol_new tas5717_controls[] = {
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
+ SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
+ TAS5717_CH1_LEFT_CH_MIX_REG,
+ TAS5717_CH1_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
+
+ SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
+ TAS5717_CH2_LEFT_CH_MIX_REG,
+ TAS5717_CH2_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
+
/*
* The biquads are named according to the register names.
* Please note that TI's TAS57xx Graphical Development Environment
@@ -747,13 +736,14 @@ static int tas571x_i2c_probe(struct i2c_client *client,
/* pulse the active low reset line for ~100us */
usleep_range(100, 200);
gpiod_set_value(priv->reset_gpio, 0);
- usleep_range(12000, 20000);
+ usleep_range(13500, 20000);
}
ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
if (ret)
return ret;
+ usleep_range(50000, 60000);
memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
priv->codec_driver.component_driver.controls = priv->chip->controls;
@@ -770,9 +760,6 @@ static int tas571x_i2c_probe(struct i2c_client *client,
return ret;
}
- regcache_cache_only(priv->regmap, true);
- gpiod_set_value(priv->pdn_gpio, 1);
-
return snd_soc_register_codec(&client->dev, &priv->codec_driver,
&tas571x_dai, 1);
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 26eb5a0a5575..fd5d1e091038 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -47,6 +47,7 @@ config SND_SOC_INTEL_SST_MATCH
config SND_SOC_INTEL_HASWELL
tristate
+ select SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_BAYTRAIL
tristate
@@ -56,7 +57,6 @@ config SND_SOC_INTEL_HASWELL_MACH
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
@@ -138,7 +138,6 @@ config SND_SOC_INTEL_BROADWELL_MACH
I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index ba5c0d71720a..0a88537ca58a 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -416,6 +416,7 @@ static const struct dmi_system_id cht_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
},
+ { }
};
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 6532b8f0ab2f..865a21e557cc 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -130,8 +130,8 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
*/
ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset,
- NULL, 0);
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &broxton_headset, NULL, 0);
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 2989c164dafe..06fa5e85dd0e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -674,7 +674,7 @@ static int skl_probe(struct pci_dev *pci,
if (skl->nhlt == NULL) {
err = -ENODEV;
- goto out_free;
+ goto out_display_power_off;
}
skl_nhlt_update_topology_bin(skl);
@@ -746,6 +746,9 @@ out_mach_free:
skl_machine_device_unregister(skl);
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
+out_display_power_off:
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_display_power(bus, false);
out_free:
skl->init_failed = 1;
skl_free(ebus);
@@ -785,8 +788,7 @@ static void skl_remove(struct pci_dev *pci)
release_firmware(skl->tplg);
- if (pci_dev_run_wake(pci))
- pm_runtime_get_noresume(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
/* codec removal, invoke bus_device_remove */
snd_hdac_ext_bus_device_remove(ebus);
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index f2bf8661dd21..823b5a236d8d 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -208,7 +208,7 @@ config SND_PXA2XX_SOC_IMOTE2
config SND_MMP_SOC_BROWNSTONE
tristate "SoC Audio support for Marvell Brownstone"
- depends on SND_MMP_SOC && MACH_BROWNSTONE
+ depends on SND_MMP_SOC && MACH_BROWNSTONE && I2C
select SND_MMP_SOC_SSPA
select MFD_WM8994
select SND_SOC_WM8994
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 3cde9fb977fa..eff3f9a8b685 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -586,3 +586,6 @@ int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
+
+MODULE_DESCRIPTION("QTi LPASS CPU Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index e2ff538a8aa5..07000f53db44 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -61,7 +61,40 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- int ret;
+ struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_variant *v = drvdata->variant;
+ int ret, dma_ch, dir = substream->stream;
+ struct lpass_pcm_data *data;
+
+ data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->i2s_port = cpu_dai->driver->id;
+ runtime->private_data = data;
+
+ if (v->alloc_dma_channel)
+ dma_ch = v->alloc_dma_channel(drvdata, dir);
+ if (dma_ch < 0)
+ return dma_ch;
+
+ drvdata->substream[dma_ch] = substream;
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_DMACTL_REG(v, dma_ch, dir), 0);
+ if (ret) {
+ dev_err(soc_runtime->dev,
+ "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ data->rdma_ch = dma_ch;
+ else
+ data->wrdma_ch = dma_ch;
snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
@@ -80,13 +113,40 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
return 0;
}
+static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_variant *v = drvdata->variant;
+ struct lpass_pcm_data *data;
+ int dma_ch, dir = substream->stream;
+
+ data = runtime->private_data;
+ v = drvdata->variant;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_ch = data->rdma_ch;
+ else
+ dma_ch = data->wrdma_ch;
+
+ drvdata->substream[dma_ch] = NULL;
+
+ if (v->free_dma_channel)
+ v->free_dma_channel(drvdata, dma_ch);
+
+ return 0;
+}
+
static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
snd_pcm_format_t format = params_format(params);
unsigned int channels = params_channels(params);
@@ -179,7 +239,8 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int reg;
int ret;
@@ -203,7 +264,8 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -257,7 +319,8 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -333,7 +396,8 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int base_addr, curr_addr;
int ret, ch, dir = substream->stream;
@@ -374,6 +438,7 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops lpass_platform_pcm_ops = {
.open = lpass_platform_pcmops_open,
+ .close = lpass_platform_pcmops_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = lpass_platform_pcmops_hw_params,
.hw_free = lpass_platform_pcmops_hw_free,
@@ -470,117 +535,45 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
{
struct snd_pcm *pcm = soc_runtime->pcm;
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct lpass_data *drvdata =
- snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_variant *v = drvdata->variant;
int ret = -EINVAL;
- struct lpass_pcm_data *data;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->i2s_port = cpu_dai->driver->id;
- drvdata->private_data = data;
-
psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (psubstream) {
- if (v->alloc_dma_channel)
- data->rdma_ch = v->alloc_dma_channel(drvdata,
- SNDRV_PCM_STREAM_PLAYBACK);
-
- if (data->rdma_ch < 0)
- return data->rdma_ch;
-
- drvdata->substream[data->rdma_ch] = psubstream;
-
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
soc_runtime->platform->dev,
size, &psubstream->dma_buffer);
- if (ret)
- goto playback_alloc_err;
-
- ret = regmap_write(drvdata->lpaif_map,
- LPAIF_RDMACTL_REG(v, data->rdma_ch), 0);
if (ret) {
- dev_err(soc_runtime->dev,
- "%s() error writing to rdmactl reg: %d\n",
- __func__, ret);
- goto capture_alloc_err;
+ dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
+ return ret;
}
}
csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
if (csubstream) {
- if (v->alloc_dma_channel)
- data->wrdma_ch = v->alloc_dma_channel(drvdata,
- SNDRV_PCM_STREAM_CAPTURE);
-
- if (data->wrdma_ch < 0) {
- ret = data->wrdma_ch;
- goto capture_alloc_err;
- }
-
- drvdata->substream[data->wrdma_ch] = csubstream;
-
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
soc_runtime->platform->dev,
size, &csubstream->dma_buffer);
- if (ret)
- goto capture_alloc_err;
-
- ret = regmap_write(drvdata->lpaif_map,
- LPAIF_WRDMACTL_REG(v, data->wrdma_ch), 0);
if (ret) {
- dev_err(soc_runtime->dev,
- "%s() error writing to wrdmactl reg: %d\n",
- __func__, ret);
- goto capture_reg_err;
+ dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
}
+
}
return 0;
-
-capture_reg_err:
- if (csubstream)
- snd_dma_free_pages(&csubstream->dma_buffer);
-
-capture_alloc_err:
- if (psubstream)
- snd_dma_free_pages(&psubstream->dma_buffer);
-
- playback_alloc_err:
- dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
-
- return ret;
}
static void lpass_platform_pcm_free(struct snd_pcm *pcm)
{
- struct snd_soc_pcm_runtime *rt;
- struct lpass_data *drvdata;
- struct lpass_pcm_data *data;
- struct lpass_variant *v;
struct snd_pcm_substream *substream;
- int ch, i;
+ int i;
for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
substream = pcm->streams[i].substream;
if (substream) {
- rt = substream->private_data;
- drvdata = snd_soc_platform_get_drvdata(rt->platform);
- data = drvdata->private_data;
-
- ch = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- ? data->rdma_ch
- : data->wrdma_ch;
- v = drvdata->variant;
- drvdata->substream[ch] = NULL;
- if (v->free_dma_channel)
- v->free_dma_channel(drvdata, ch);
-
snd_dma_free_pages(&substream->dma_buffer);
substream->dma_buffer.area = NULL;
substream->dma_buffer.addr = 0;
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 35b3cea8207d..924971b6ded5 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -59,7 +59,6 @@ struct lpass_data {
struct clk *pcnoc_mport_clk;
struct clk *pcnoc_sway_clk;
- void *private_data;
};
/* Vairant data per each SOC */
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 97d6700b1009..cbc0023c2bc8 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -383,11 +383,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
goto err4;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
- s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
- if (ret)
- goto err5;
-
ret = samsung_asoc_dma_platform_register(&pdev->dev,
ac97_pdata->dma_filter,
NULL, NULL);
@@ -396,6 +391,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
goto err5;
}
+ ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
+ s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
+ if (ret)
+ goto err5;
+
return 0;
err5:
free_irq(irq_res->start, NULL);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 7e32cf4581f8..7825bff45ae3 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1237,14 +1237,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to get drvdata\n");
return -EFAULT;
}
- ret = devm_snd_soc_register_component(&sec_dai->pdev->dev,
- &samsung_i2s_component,
- &sec_dai->i2s_dai_drv, 1);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ sec_dai->filter, "tx-sec", NULL);
if (ret != 0)
return ret;
- return samsung_asoc_dma_platform_register(&pdev->dev,
- sec_dai->filter, "tx-sec", NULL);
+ return devm_snd_soc_register_component(&sec_dai->pdev->dev,
+ &samsung_i2s_component,
+ &sec_dai->i2s_dai_drv, 1);
}
pri_dai = i2s_alloc_dai(pdev, false);
@@ -1314,6 +1314,11 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (quirks & QUIRK_PRI_6CHAN)
pri_dai->i2s_dai_drv.playback.channels_max = 6;
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
+ NULL, NULL);
+ if (ret < 0)
+ goto err_disable_clk;
+
if (quirks & QUIRK_SEC_DAI) {
sec_dai = i2s_alloc_dai(pdev, true);
if (!sec_dai) {
@@ -1353,10 +1358,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (ret < 0)
goto err_free_dai;
- ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
- NULL, NULL);
- if (ret < 0)
- goto err_free_dai;
pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 43e367a9acc3..c484985812ed 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -565,24 +565,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
+ goto err5;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component,
&s3c_pcm_dai[pdev->id], 1);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret);
- goto err5;
- }
-
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
- goto err5;
+ goto err6;
}
return 0;
-
+err6:
+ pm_runtime_disable(&pdev->dev);
err5:
clk_disable_unprepare(pcm->pclk);
err4:
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 3e89fbc0c51d..0a4718207e6e 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -168,19 +168,19 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD;
s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
- ret = s3c_i2sv2_register_component(&pdev->dev, -1,
- &s3c2412_i2s_component,
- &s3c2412_i2s_dai);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ pdata->dma_filter,
+ NULL, NULL);
if (ret) {
- pr_err("failed to register the dai\n");
+ pr_err("failed to register the DMA: %d\n", ret);
return ret;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter,
- NULL, NULL);
+ ret = s3c_i2sv2_register_component(&pdev->dev, -1,
+ &s3c2412_i2s_component,
+ &s3c2412_i2s_dai);
if (ret)
- pr_err("failed to register the DMA: %d\n", ret);
+ pr_err("failed to register the dai\n");
return ret;
}
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index c78a936a3099..9052f6a7073e 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -474,18 +474,18 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ pdata->dma_filter,
+ NULL, NULL);
if (ret) {
- pr_err("failed to register the dai\n");
+ pr_err("failed to register the dma: %d\n", ret);
return ret;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter,
- NULL, NULL);
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
if (ret)
- pr_err("failed to register the dma: %d\n", ret);
+ pr_err("failed to register the dai\n");
return ret;
}
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 26c1fbed4d35..779504f54bc0 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -416,15 +416,6 @@ static int spdif_probe(struct platform_device *pdev)
goto err3;
}
- dev_set_drvdata(&pdev->dev, spdif);
-
- ret = devm_snd_soc_register_component(&pdev->dev,
- &samsung_spdif_component, &samsung_spdif_dai, 1);
- if (ret != 0) {
- dev_err(&pdev->dev, "fail to register dai\n");
- goto err4;
- }
-
spdif_stereo_out.addr_width = 2;
spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF;
filter = NULL;
@@ -432,7 +423,6 @@ static int spdif_probe(struct platform_device *pdev)
spdif_stereo_out.filter_data = spdif_pdata->dma_playback;
filter = spdif_pdata->dma_filter;
}
-
spdif->dma_playback = &spdif_stereo_out;
ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
@@ -442,6 +432,15 @@ static int spdif_probe(struct platform_device *pdev)
goto err4;
}
+ dev_set_drvdata(&pdev->dev, spdif);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &samsung_spdif_component, &samsung_spdif_dai, 1);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "fail to register dai\n");
+ goto err4;
+ }
+
return 0;
err4:
iounmap(spdif->regs);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 1bc8ebc2528e..ad54d4cf58ad 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -614,7 +614,11 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
iec958->status[3] = ucontrol->value.iec958.status[3];
mutex_unlock(&player->ctrl_lock);
- uni_player_set_channel_status(player, NULL);
+ if (player->substream && player->substream->runtime)
+ uni_player_set_channel_status(player,
+ player->substream->runtime);
+ else
+ uni_player_set_channel_status(player, NULL);
return 0;
}
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index e047ec06d538..56ed9472e89f 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -765,11 +765,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
if (!card)
- return NULL;
+ return ERR_PTR(-ENOMEM);
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
- return NULL;
+ return ERR_PTR(-ENOMEM);
card->dev = dev;
card->name = "sun4i-codec";
@@ -829,12 +829,6 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return PTR_ERR(scodec->clk_module);
}
- /* Enable the bus clock */
- if (clk_prepare_enable(scodec->clk_apb)) {
- dev_err(&pdev->dev, "Failed to enable the APB clock\n");
- return -EINVAL;
- }
-
scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa",
GPIOD_OUT_LOW);
if (IS_ERR(scodec->gpio_pa)) {
@@ -844,6 +838,12 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return ret;
}
+ /* Enable the bus clock */
+ if (clk_prepare_enable(scodec->clk_apb)) {
+ dev_err(&pdev->dev, "Failed to enable the APB clock\n");
+ return -EINVAL;
+ }
+
/* DMA configuration for TX FIFO */
scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA;
scodec->playback_dma_data.maxburst = 4;
@@ -876,7 +876,8 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
card = sun4i_codec_create_card(&pdev->dev);
- if (!card) {
+ if (IS_ERR(card)) {
+ ret = PTR_ERR(card);
dev_err(&pdev->dev, "Failed to create our card\n");
goto err_unregister_codec;
}
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index b4bf76971dc9..1eef0aed6423 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv)
struct cpufreq_affected_cpus *cpus;
if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpupower_is_cpu_online(cpu))
+ cpupower_is_cpu_online(cpu) != 1)
continue;
cpus = cpufreq_get_related_cpus(cpu);
@@ -316,10 +316,7 @@ int cmd_freq_set(int argc, char **argv)
cpu <= bitmask_last(cpus_chosen); cpu++) {
if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpupower_is_cpu_online(cpu))
- continue;
-
- if (cpupower_is_cpu_online(cpu) != 1)
+ cpupower_is_cpu_online(cpu) != 1)
continue;
printf(_("Setting cpu: %d\n"), cpu);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index e18b30ddcdce..ebe1b9fa3c4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
return container_of(dev, struct vgic_io_device, dev);
}
-static bool check_region(const struct vgic_register_region *region,
+static bool check_region(const struct kvm *kvm,
+ const struct vgic_register_region *region,
gpa_t addr, int len)
{
- if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
- return true;
- if ((region->access_flags & VGIC_ACCESS_32bit) &&
- len == sizeof(u32) && !(addr & 3))
- return true;
- if ((region->access_flags & VGIC_ACCESS_64bit) &&
- len == sizeof(u64) && !(addr & 7))
- return true;
+ int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+
+ switch (len) {
+ case sizeof(u8):
+ flags = VGIC_ACCESS_8bit;
+ break;
+ case sizeof(u32):
+ flags = VGIC_ACCESS_32bit;
+ break;
+ case sizeof(u64):
+ flags = VGIC_ACCESS_64bit;
+ break;
+ default:
+ return false;
+ }
+
+ if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
+ if (!region->bits_per_irq)
+ return true;
+
+ /* Do we access a non-allocated IRQ? */
+ return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
+ }
return false;
}
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region || !check_region(region, addr, len)) {
+ if (!region || !check_region(vcpu->kvm, region, addr, len)) {
memset(val, 0, len);
return 0;
}
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region)
- return 0;
-
- if (!check_region(region, addr, len))
+ if (!region || !check_region(vcpu->kvm, region, addr, len))
return 0;
switch (iodev->iodev_type) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 4c34d39d44a0..84961b4e4422 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
/*
- * (addr & mask) gives us the byte offset for the INT ID, so we want to
- * divide this with 'bytes per irq' to get the INT ID, which is given
- * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
- * take advantage of the fact that division by a fraction equals
- * multiplication with the inverted fraction, and scale up both the
- * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ * (addr & mask) gives us the _byte_ offset for the INT ID.
+ * We multiply this by 8 the get the _bit_ offset, then divide this by
+ * the number of bits to learn the actual INT ID.
+ * But instead of a division (which requires a "long long div" implementation),
+ * we shift by the binary logarithm of <bits>.
+ * This assumes that <bits> is a power of two.
*/
#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
- 64 / (bits) / 8)
+ 8 >> ilog2(bits))
/*
* Some VGIC registers store per-IRQ information, with a different number
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 2893d5ba523a..6440b56ec90e 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -273,6 +273,18 @@ retry:
* no more work for us to do.
*/
spin_unlock(&irq->irq_lock);
+
+ /*
+ * We have to kick the VCPU here, because we could be
+ * queueing an edge-triggered interrupt for which we
+ * get no EOI maintenance interrupt. In that case,
+ * while the IRQ is already on the VCPU's AP list, the
+ * VCPU could have EOI'ed the original interrupt and
+ * won't see this one until it exits for some other
+ * reason.
+ */
+ if (vcpu)
+ kvm_vcpu_kick(vcpu);
return false;
}