summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt75
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt47
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt47
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.txt91
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-lvds.txt120
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt3
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/porting6
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/pinctrl.txt8
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile16
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/probes/kprobes/core.c49
-rw-r--r--arch/arm/probes/kprobes/test-core.c11
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/metag/include/asm/uaccess.h15
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/irq.h15
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/genex.S12
-rw-r--r--arch/mips/kernel/process.c56
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/ralink/rt3883.c4
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kvm/gaccess.c7
-rw-r--r--arch/sparc/include/asm/page_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/misctrap.S1
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/spiterrs.S1
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S1
-rw-r--r--arch/sparc/kernel/urtt_fill.S1
-rw-r--r--arch/sparc/kernel/winfixup.S2
-rw-r--r--arch/sparc/lib/NG2memcpy.S4
-rw-r--r--arch/sparc/lib/NG4memcpy.S1
-rw-r--r--arch/sparc/lib/NG4memset.S1
-rw-r--r--arch/sparc/lib/NGmemcpy.S1
-rw-r--r--arch/sparc/mm/hugetlbpage.c9
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c1
-rw-r--r--arch/sparc/mm/tlb.c6
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c7
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/xtensa/include/asm/page.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h5
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/blk-mq-sched.c181
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq.c85
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c114
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c268
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h82
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h50
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c324
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h57
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c99
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c42
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c427
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h19
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c229
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c85
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h98
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c33
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c828
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h18
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c29
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c100
-rw-r--r--drivers/gpu/drm/i915/intel_display.c429
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h27
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h71
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c49
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c386
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c65
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c131
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c63
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c88
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h65
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c286
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c342
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h84
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c150
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c11
-rw-r--r--drivers/gpu/drm/imx/Kconfig7
-rw-r--r--drivers/gpu/drm/imx/Makefile3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c18
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c64
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c39
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c75
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c138
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c572
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c38
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c90
-rw-r--r--drivers/gpu/drm/panel/Kconfig10
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c286
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig10
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c32
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c134
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c143
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c68
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c82
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c100
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/gpu/ipu-v3/Makefile6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c4
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c10
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/iio/industrialio-core.c7
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-verity-fec.c18
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/irda/vlsi_ir.c8
-rw-r--r--drivers/net/phy/mdio-boardinfo.c1
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/usb/cdc_ether.c15
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c6
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/pci/dwc/Kconfig1
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/host/pci-thunder-pem.c10
-rw-r--r--drivers/pinctrl/core.c97
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c20
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/cifs/cifsfs.c87
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/ioctl.c66
-rw-r--r--fs/cifs/smb2misc.c46
-rw-r--r--fs/cifs/smb2ops.c41
-rw-r--r--fs/cifs/smb2pdu.c4
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c55
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/dax.c35
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c41
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/orangefs/super.c9
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/stat.c86
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c63
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c35
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_inode.c19
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/drm_connector.h4
-rw-r--r--include/drm/ttm/ttm_bo_api.h11
-rw-r--r--include/drm/ttm/ttm_bo_driver.h9
-rw-r--r--include/drm/ttm/ttm_object.h5
-rw-r--r--include/drm/ttm/ttm_placement.h1
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/mfd/cros_ec.h3
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/uapi/drm/etnaviv_drm.h8
-rw-r--r--include/uapi/linux/stat.h5
-rw-r--r--kernel/audit.h8
-rw-r--r--kernel/auditsc.c25
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/ptrace.c14
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/internal.h7
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/vmstat.c15
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c20
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/tcp_input.c41
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/l2tp/l2tp_core.c160
-rw-r--r--net/l2tp/l2tp_core.h9
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_eth.c10
-rw-r--r--net/l2tp/l2tp_ip.c22
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/l2tp/l2tp_netlink.c52
-rw-r--r--net/l2tp/l2tp_ppp.c94
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c287
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/sctp/associola.c13
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c69
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c18
-rw-r--r--net/sctp/stream.c43
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/wireless/sysfs.c10
-rw-r--r--samples/statx/test-statx.c12
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/testing/selftests/bpf/Makefile9
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c290
-rw-r--r--tools/testing/selftests/powerpc/Makefile10
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
519 files changed, 10086 insertions, 5651 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..e229922dc7f0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -171,6 +171,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
new file mode 100644
index 000000000000..f6b3f36d422b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
@@ -0,0 +1,75 @@
+Renesas Gen3 DWC HDMI TX Encoder
+================================
+
+The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP
+with a companion PHY IP.
+
+These DT bindings follow the Synopsys DWC HDMI TX bindings defined in
+Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the
+following device-specific properties.
+
+
+Required properties:
+
+- compatible : Shall contain one or more of
+ - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX
+ - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 compatible HDMI TX
+
+ When compatible with generic versions, nodes must list the SoC-specific
+ version corresponding to the platform first, followed by the
+ family-specific version.
+
+- reg: See dw_hdmi.txt.
+- interrupts: HDMI interrupt number
+- clocks: See dw_hdmi.txt.
+- clock-names: Shall contain "iahb" and "isfr" as defined in dw_hdmi.txt.
+- ports: See dw_hdmi.txt. The DWC HDMI shall have one port numbered 0
+ corresponding to the video input of the controller and one port numbered 1
+ corresponding to its HDMI output. Each port shall have a single endpoint.
+
+Optional properties:
+
+- power-domains: Shall reference the power domain that contains the DWC HDMI,
+ if any.
+
+
+Example:
+
+ hdmi0: hdmi0@fead0000 {
+ compatible = "renesas,r8a7795-dw-hdmi";
+ reg = <0 0xfead0000 0 0x10000>;
+ interrupts = <0 389 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_CORE R8A7795_CLK_S0D4>, <&cpg CPG_MOD 729>;
+ clock-names = "iahb", "isfr";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ dw_hdmi0_in: endpoint {
+ remote-endpoint = <&du_out_hdmi0>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ rcar_dw_hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi0_con>;
+ };
+ };
+ };
+ };
+
+ hdmi0-out {
+ compatible = "hdmi-connector";
+ label = "HDMI0 OUT";
+ type = "a";
+
+ port {
+ hdmi0_con: endpoint {
+ remote-endpoint = <&rcar_dw_hdmi0_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 708f5664a316..383183a89164 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -40,6 +40,7 @@ Required properties (all function blocks):
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
@@ -54,6 +55,7 @@ Required properties (DMA function blocks):
"mediatek,<chip>-disp-ovl"
"mediatek,<chip>-disp-rdma"
"mediatek,<chip>-disp-wdma"
+ the supported chips are mt2701 and mt8173.
- larb: Should contain a phandle pointing to the local arbiter device as defined
in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
- iommus: Should point to the respective IOMMU block with master port as
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index 2b1585a34b85..fadf327c7cdf 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -7,6 +7,7 @@ channel output.
Required properties:
- compatible: "mediatek,<chip>-dsi"
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
@@ -25,6 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
Required properties:
- compatible: "mediatek,<chip>-mipi-tx"
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the controller's registers
- clocks: PLL reference clock
- clock-output-names: name of the output clock line to the DSI encoder
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt
new file mode 100644
index 000000000000..ced0121aed7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt
@@ -0,0 +1,47 @@
+Mitsubishi AA204XD12 LVDS Display Panel
+=======================================
+
+The AA104XD12 is a 10.4" XGA TFT-LCD display panel.
+
+These DT bindings follow the LVDS panel bindings defined in panel-lvds.txt
+with the following device-specific properties.
+
+
+Required properties:
+
+- compatible: Shall contain "mitsubishi,aa121td01" and "panel-lvds", in that
+ order.
+- vcc-supply: Reference to the regulator powering the panel VCC pins.
+
+
+Example
+-------
+
+panel {
+ compatible = "mitsubishi,aa104xd12", "panel-lvds";
+ vcc-supply = <&vcc_3v3>;
+
+ width-mm = <210>;
+ height-mm = <158>;
+
+ data-mapping = "jeida-24";
+
+ panel-timing {
+ /* 1024x768 @65Hz */
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hsync-len = <136>;
+ hfront-porch = <20>;
+ hback-porch = <160>;
+ vfront-porch = <3>;
+ vback-porch = <29>;
+ vsync-len = <6>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_encoder>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt
new file mode 100644
index 000000000000..d6e1097504fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt
@@ -0,0 +1,47 @@
+Mitsubishi AA121TD01 LVDS Display Panel
+=======================================
+
+The AA121TD01 is a 12.1" WXGA TFT-LCD display panel.
+
+These DT bindings follow the LVDS panel bindings defined in panel-lvds.txt
+with the following device-specific properties.
+
+
+Required properties:
+
+- compatible: Shall contain "mitsubishi,aa121td01" and "panel-lvds", in that
+ order.
+- vcc-supply: Reference to the regulator powering the panel VCC pins.
+
+
+Example
+-------
+
+panel {
+ compatible = "mitsubishi,aa121td01", "panel-lvds";
+ vcc-supply = <&vcc_3v3>;
+
+ width-mm = <261>;
+ height-mm = <163>;
+
+ data-mapping = "jeida-24";
+
+ panel-timing {
+ /* 1280x800 @60Hz */
+ clock-frequency = <71000000>;
+ hactive = <1280>;
+ vactive = <800>;
+ hsync-len = <70>;
+ hfront-porch = <20>;
+ hback-porch = <70>;
+ vsync-len = <5>;
+ vfront-porch = <3>;
+ vback-porch = <15>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_encoder>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt
new file mode 100644
index 000000000000..ec52c472c845
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt
@@ -0,0 +1,91 @@
+Common Properties for Display Panel
+===================================
+
+This document defines device tree properties common to several classes of
+display panels. It doesn't constitue a device tree binding specification by
+itself but is meant to be referenced by device tree bindings.
+
+When referenced from panel device tree bindings the properties defined in this
+document are defined as follows. The panel device tree bindings are
+responsible for defining whether each property is required or optional.
+
+
+Descriptive Properties
+----------------------
+
+- width-mm,
+- height-mm: The width-mm and height-mm specify the width and height of the
+ physical area where images are displayed. These properties are expressed in
+ millimeters and rounded to the closest unit.
+
+- label: The label property specifies a symbolic name for the panel as a
+ string suitable for use by humans. It typically contains a name inscribed on
+ the system (e.g. as an affixed label) or specified in the system's
+ documentation (e.g. in the user's manual).
+
+ If no such name exists, and unless the property is mandatory according to
+ device tree bindings, it shall rather be omitted than constructed of
+ non-descriptive information. For instance an LCD panel in a system that
+ contains a single panel shall not be labelled "LCD" if that name is not
+ inscribed on the system or used in a descriptive fashion in system
+ documentation.
+
+
+Display Timings
+---------------
+
+- panel-timing: Most display panels are restricted to a single resolution and
+ require specific display timings. The panel-timing subnode expresses those
+ timings as specified in the timing subnode section of the display timing
+ bindings defined in
+ Documentation/devicetree/bindings/display/display-timing.txt.
+
+
+Connectivity
+------------
+
+- ports: Panels receive video data through one or multiple connections. While
+ the nature of those connections is specific to the panel type, the
+ connectivity is expressed in a standard fashion using ports as specified in
+ the device graph bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+
+- ddc-i2c-bus: Some panels expose EDID information through an I2C-compatible
+ bus such as DDC2 or E-DDC. For such panels the ddc-i2c-bus contains a
+ phandle to the system I2C controller connected to that bus.
+
+
+Control I/Os
+------------
+
+Many display panels can be controlled through pins driven by GPIOs. The nature
+and timing of those control signals are device-specific and left for panel
+device tree bindings to specify. The following GPIO specifiers can however be
+used for panels that implement compatible control signals.
+
+- enable-gpios: Specifier for a GPIO connected to the panel enable control
+ signal. The enable signal is active high and enables operation of the panel.
+ This property can also be used for panels implementing an active low power
+ down signal, which is a negated version of the enable signal. Active low
+ enable signals (or active high power down signals) can be supported by
+ inverting the GPIO specifier polarity flag.
+
+ Note that the enable signal control panel operation only and must not be
+ confused with a backlight enable signal.
+
+- reset-gpios: Specifier for a GPIO coonnected to the panel reset control
+ signal. The reset signal is active low and resets the panel internal logic
+ while active. Active high reset signals can be supported by inverting the
+ GPIO specifier polarity flag.
+
+
+Backlight
+---------
+
+Most display panels include a backlight. Some of them also include a backlight
+controller exposed through a control bus such as I2C or DSI. Others expose
+backlight control through GPIO, PWM or other signals connected to an external
+backlight controller.
+
+- backlight: For panels whose backlight is controlled by an external backlight
+ controller, this property contains a phandle that references the controller.
diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.txt b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt
new file mode 100644
index 000000000000..b938269f841e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt
@@ -0,0 +1,120 @@
+LVDS Display Panel
+==================
+
+LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple
+incompatible data link layers have been used over time to transmit image data
+to LVDS panels. This bindings supports display panels compatible with the
+following specifications.
+
+[JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February
+1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA)
+[LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National
+Semiconductor
+[VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video
+Electronics Standards Association (VESA)
+
+Device compatible with those specifications have been marketed under the
+FPD-Link and FlatLink brands.
+
+
+Required properties:
+
+- compatible: Shall contain "panel-lvds" in addition to a mandatory
+ panel-specific compatible string defined in individual panel bindings. The
+ "panel-lvds" value shall never be used on its own.
+- width-mm: See panel-common.txt.
+- height-mm: See panel-common.txt.
+- data-mapping: The color signals mapping order, "jeida-18", "jeida-24"
+ or "vesa-24".
+
+Optional properties:
+
+- label: See panel-common.txt.
+- gpios: See panel-common.txt.
+- backlight: See panel-common.txt.
+- data-mirror: If set, reverse the bit order described in the data mappings
+ below on all data lanes, transmitting bits for slots 6 to 0 instead of
+ 0 to 6.
+
+Required nodes:
+
+- panel-timing: See panel-common.txt.
+- ports: See panel-common.txt. These bindings require a single port subnode
+ corresponding to the panel LVDS input.
+
+
+LVDS data mappings are defined as follows.
+
+- "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and
+ [VESA] specifications. Data are transferred as follows on 3 LVDS lanes.
+
+Slot 0 1 2 3 4 5 6
+ ________________ _________________
+Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
+DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
+DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
+
+- "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI]
+ specifications. Data are transferred as follows on 4 LVDS lanes.
+
+Slot 0 1 2 3 4 5 6
+ ________________ _________________
+Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__><
+DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__><
+DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__><
+DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__><
+
+- "vesa-24" - 24-bit data mapping compatible with the [VESA] specification.
+ Data are transferred as follows on 4 LVDS lanes.
+
+Slot 0 1 2 3 4 5 6
+ ________________ _________________
+Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
+DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
+DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
+DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__><
+
+Control signals are mapped as follows.
+
+CTL0: HSync
+CTL1: VSync
+CTL2: Data Enable
+CTL3: 0
+
+
+Example
+-------
+
+panel {
+ compatible = "mitsubishi,aa121td01", "panel-lvds";
+
+ width-mm = <261>;
+ height-mm = <163>;
+
+ data-mapping = "jeida-24";
+
+ panel-timing {
+ /* 1280x800 @60Hz */
+ clock-frequency = <71000000>;
+ hactive = <1280>;
+ vactive = <800>;
+ hsync-len = <70>;
+ hfront-porch = <20>;
+ hback-porch = <70>;
+ vsync-len = <5>;
+ vfront-porch = <3>;
+ vback-porch = <15>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_encoder>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 1a02f099a0ff..c6cb96a4fa93 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -36,6 +36,9 @@ Required Properties:
When supplied they must be named "dclkin.x" with "x" being the input
clock numerical index.
+ - vsps: A list of phandles to the VSP nodes that handle the memory
+ interfaces for the DU channels.
+
Required nodes:
The connections to the DU output video ports are modeled using the OF graph
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fdcfdd79682a..fe25787ff6d4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -58,8 +58,7 @@ prototypes:
int (*permission) (struct inode *, int, unsigned int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct dentry *, struct kstat *,
- u32, unsigned int);
+ int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
void (*update_time)(struct inode *, struct timespec *, int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 95280079c0b3..5fb17f49f7a2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -600,3 +600,9 @@ in your dentry operations instead.
[recommended]
->readlink is optional for symlinks. Don't set, unless filesystem needs
to fake something for readlink(2).
+--
+[mandatory]
+ ->getattr() is now passed a struct path rather than a vfsmount and
+ dentry separately, and it now has request_mask and query_flags arguments
+ to specify the fields and sync type requested by statx. Filesystems not
+ supporting any statx-specific features may ignore the new arguments.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 569211703721..94dd27ef4a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -382,8 +382,7 @@ struct inode_operations {
int (*permission) (struct inode *, int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct dentry *, struct kstat *,
- u32, unsigned int);
+ int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 54bd5faa8782..f2af35f6d6b2 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
int __init foo_probe(void)
{
+ int error;
+
struct pinctrl_dev *pctl;
- return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+ error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+ if (error)
+ return error;
+
+ return pinctrl_enable(pctl);
}
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 11ec2d93a5e0..61e9c78bd6d1 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
.. code-block:: none
- Cc: <stable@vger.kernel.org> # 3.3.x-
+ Cc: <stable@vger.kernel.org> # 3.3.x
The tag has the meaning of:
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
Bits for undefined preemption levels are RAZ/WI.
+ For historical reasons and to provide ABI compatibility with userspace we
+ export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
+ field in the lower 5 bits of a word, meaning that userspace must always
+ use the lower 5 bits to communicate with the KVM device and must shift the
+ value left by 3 places to obtain the actual priority mask level.
+
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/MAINTAINERS b/MAINTAINERS
index 4ea82b26cc2e..7df09152b2e0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4117,14 +4117,13 @@ F: drivers/block/drbd/
F: lib/lru_cache.c
F: Documentation/blockdev/drbd/
-DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
+DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
F: fs/debugfs/
-F: fs/kernfs/
F: fs/sysfs/
F: include/linux/debugfs.h
F: include/linux/kobj*
@@ -4384,6 +4383,7 @@ S: Supported
F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
F: include/linux/platform_data/shmob_drm.h
+F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVER FOR QXL VIRTUAL GPU
@@ -4937,6 +4937,7 @@ F: include/linux/netfilter_bridge/
F: net/bridge/
ETHERNET PHY LIBRARY
+M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -7098,9 +7099,9 @@ S: Maintained
F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
+M: Masahiro Yamada <yamada.masahiro@socionext.com>
M: Michal Marek <mmarek@suse.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
L: linux-kbuild@vger.kernel.org
S: Maintained
F: Documentation/kbuild/
@@ -7217,6 +7218,14 @@ F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
+KERNFS
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Tejun Heo <tj@kernel.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+S: Supported
+F: include/linux/kernfs.h
+F: fs/kernfs/
+
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -10831,6 +10840,7 @@ F: drivers/s390/block/dasd*
F: block/partitions/ibm.c
S390 NETWORK DRIVERS
+M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -10861,6 +10871,7 @@ S: Supported
F: drivers/s390/scsi/zfcp_*
S390 IUCV NETWORK LAYER
+M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
diff --git a/Makefile b/Makefile
index e11989d36c87..efa267a92ba6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
+CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
- offsetof(struct timex32, time)))
+ offsetof(struct timex32, tick)))
return -EFAULT;
ret = do_adjtimex(&txc);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
+
+ if (vgic_present)
+ kvm_vgic_init_cpu_hardware();
}
static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
+ assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (next != end)
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
+ down_read(&current->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+ up_read(&current->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
if (kvm->arch.pgd == NULL)
return;
+ spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ spin_unlock(&kvm->mmu_lock);
+
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(&current->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(&current->mm->mmap_sem);
return ret;
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 63eabb06f9f1..475811f5383a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable. This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ * a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ * as we will try to flush the memory through a different alias to that
+ * actually being used (and the flushes are redundant.)
+ */
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ unsigned long pfn = dma_to_pfn(dev, handle);
+ struct page *page;
int ret;
+ /* If the PFN is not valid, we do not have a struct page */
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ page = pfn_to_page(pfn);
+
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b5c7aaf9c76..33a45bd96860 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
*/
static inline bool security_extensions_enabled(void)
{
- return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ /* Check CPUID Identification Scheme before ID_PFR1 read */
+ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
+ return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ return 0;
}
static unsigned long __init setup_vectors_base(void)
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index b6dc9d838a9a..ad1f4e6a9e33 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
#endif
if (p) {
- if (cur) {
+ if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ /*
+ * Probe hit but conditional execution check failed,
+ * so just skip the instruction and continue as if
+ * nothing had happened.
+ * In this case, we can skip recursing check too.
+ */
+ singlestep_skip(p, regs);
+ } else if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_SS:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
+ case KPROBE_REENTER:
+ /* A nested probe was hit in FIQ, it is a BUG */
+ pr_warn("Unrecoverable kprobe detected at %p.\n",
+ p->addr);
+ /* fall through */
default:
/* impossible cases */
BUG();
}
- } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ } else {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
}
reset_current_kprobe();
}
- } else {
- /*
- * Probe hit but conditional execution check failed,
- * so just skip the instruction and continue as if
- * nothing had happened.
- */
- singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
- orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
break;
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c893726aa52d..1c98a87786ca 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -977,7 +977,10 @@ static void coverage_end(void)
void __naked __kprobes_test_case_start(void)
{
__asm__ __volatile__ (
- "stmdb sp!, {r4-r11} \n\t"
+ "mov r2, sp \n\t"
+ "bic r3, r2, #7 \n\t"
+ "mov sp, r3 \n\t"
+ "stmdb sp!, {r2-r11} \n\t"
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
"movne pc, r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
- "ldmia sp!, {r4-r11} \n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
"mov pc, r0 \n\t"
);
}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
"bxne r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
- "ldmia sp!, {r4-r11} \n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
"bx r0 \n\t"
);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4bf899fb451b..1b35b8bddbfb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -42,7 +42,20 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-static const char *fault_name(unsigned int esr);
+struct fault_info {
+ int (*fn)(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+};
+
+static const struct fault_info fault_info[];
+
+static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+{
+ return fault_info + (esr & 63);
+}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
struct pt_regs *regs)
{
struct siginfo si;
+ const struct fault_info *inf;
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
+ inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
- tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
+ tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
show_pte(tsk->mm, addr);
show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
+ const struct fault_info *inf;
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
- if (user_mode(regs))
- __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
- else
+ if (user_mode(regs)) {
+ inf = esr_to_fault_info(esr);
+ __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
+ } else
__do_kernel_fault(mm, addr, esr, regs);
}
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 1;
}
-static const struct fault_info {
- int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
- int sig;
- int code;
- const char *name;
-} fault_info[] = {
+static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -560,19 +572,13 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 63" },
};
-static const char *fault_name(unsigned int esr)
-{
- const struct fault_info *inf = fault_info + (esr & 63);
- return inf->name;
-}
-
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- const struct fault_info *inf = fault_info + (esr & 63);
+ const struct fault_info *inf = esr_to_fault_info(esr);
struct siginfo info;
if (!inf->fn(addr, esr, regs))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e25584d72396..7514a000e361 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else if (ps == (PAGE_SIZE * CONT_PTES)) {
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- } else if (ps == (PMD_SIZE * CONT_PMDS)) {
- hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
- if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..07238b39638c 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"23:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #32\n" \
"24:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "25:\n" \
+ "27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
"SUB %3, %3, #32\n" \
- "27:\n" \
+ "30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
+ "31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %0, %0, #8\n" \
- "29:\n" \
+ "33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
- " .long 29b,4b\n" \
+ " .long 29b,3b\n" \
+ " .long 30b,3b\n" \
+ " .long 31b,3b\n" \
+ " .long 32b,3b\n" \
+ " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"23:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "24:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
- "25:\n" \
+ "24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
+ "SUB %3, %3, #16\n" \
+ "30:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "29:\n" \
+ "33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "30:\n" \
+ "34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "35:\n" \
"SUB %3, %3, #16\n" \
- "31:\n" \
+ "36:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "32:\n" \
+ "37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38:\n" \
"SUB %3, %3, #16\n" \
- "33:\n" \
+ "39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "34:\n" \
+ "40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41:\n" \
"SUB %3, %3, #16\n" \
- "35:\n" \
+ "42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "36:\n" \
+ "43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44:\n" \
"SUB %0, %0, #4\n" \
- "37:\n" \
+ "45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
- " .long 37b,4b\n" \
+ " .long 37b,3b\n" \
+ " .long 38b,3b\n" \
+ " .long 39b,3b\n" \
+ " .long 40b,3b\n" \
+ " .long 41b,3b\n" \
+ " .long 42b,3b\n" \
+ " .long 43b,3b\n" \
+ " .long 44b,3b\n" \
+ " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 8 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*8 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #8\n")
+ "LSR D0Ar2, D0Ar2, #5\n" \
+ "ANDS D0Ar2, D0Ar2, #0x38\n" \
+ "ADDZ D0Ar2, D0Ar2, #32\n" \
+ "SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 4 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*4 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #4\n")
+ "LSR D0Ar2, D0Ar2, #6\n" \
+ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
+ "ADDZ D0Ar2, D0Ar2, #16\n" \
+ "SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
- /* We only need one check after the unalignment-adjustments,
- because if both adjustments were done, either both or
- neither reference had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
-
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..e0bb576410bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f94455f964ec..a2813fe381cf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -21,6 +21,7 @@
#include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
+#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/current.h>
#include <asm/msa.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e201d1..ddd1c918103b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
extern void *irq_stack[NR_CPUS];
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe51514..a8df44d60607 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3e940dbe0262..78faf4292e90 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -386,17 +386,18 @@
#define __NR_pkey_mprotect (__NR_Linux + 363)
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
+#define __NR_statx (__NR_Linux + 366)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 365
+#define __NR_Linux_syscalls 366
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 365
+#define __NR_O32_Linux_syscalls 366
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -730,16 +731,17 @@
#define __NR_pkey_mprotect (__NR_Linux + 323)
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
+#define __NR_statx (__NR_Linux + 326)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 325
+#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 325
+#define __NR_64_Linux_syscalls 326
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1077,15 +1079,16 @@
#define __NR_pkey_mprotect (__NR_Linux + 327)
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
+#define __NR_statx (__NR_Linux + 330)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 329
+#define __NR_Linux_syscalls 330
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 329
+#define __NR_N32_Linux_syscalls 330
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index bb5c5d34ba81..a670c0c11875 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a607add..a00e87b0256d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
END(mips_cps_get_bootcfg)
LEAF(mips_cps_boot_vpes)
- PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
+ lw ta2, COREBOOTCFG_VPEMASK(a0)
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 07718bb5fc9d..12422fd4af23 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
}
decode_configs(c);
- c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7ec9612cb007..ae810da4d499 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jalr v0
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
- LEAF(handle_ri_rdhwr_vivt)
+ LEAF(handle_ri_rdhwr_tlbp)
.set push
.set noat
.set noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
- END(handle_ri_rdhwr_vivt)
+ END(handle_ri_rdhwr_tlbp)
LEAF(handle_ri_rdhwr)
.set push
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fb6b6b650719..b68e10fc453d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long pc,
unsigned long *ra)
{
+ unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
+ struct pt_regs *regs;
int leaf;
- extern void ret_from_irq(void);
- extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
- * If we reached the bottom of interrupt context,
- * return saved pc in pt_regs.
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
*/
- if (pc == (unsigned long)ret_from_irq ||
- pc == (unsigned long)ret_from_exception) {
- struct pt_regs *regs;
- if (*sp >= stack_page &&
- *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
- regs = (struct pt_regs *)*sp;
- pc = regs->cp0_epc;
- if (!user_mode(regs) && __kernel_text_address(pc)) {
- *sp = regs->regs[29];
- *ra = regs->regs[31];
- return pc;
- }
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
}
return 0;
}
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
if (leaf < 0)
return 0;
- if (*sp < stack_page ||
- *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
+ if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397eee86..80ed68b2c95e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
+ PTR sys_statx
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96ee912..49765b44aa9b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
+ PTR sys_statx
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba39a065..90bad2d1b2d3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free
+ PTR sys_statx /* 6330 */
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042dd25f..2dd70bd104e1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
+ PTR sys_statx
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c7d17cfb32f6..b49e7bf9f950 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
- set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
- (cpu_has_vtag_icache ?
- handle_ri_rdhwr_vivt : handle_ri_rdhwr));
+
+ if (rdhwr_noopt)
+ set_except_vector(EXCCODE_RI, handle_ri);
+ else {
+ if (cpu_has_vtag_icache)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else if (current_cpu_type() == CPU_LOONGSON3)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+ }
+
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3c3aa05891dd..95bec460b651 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
- if (of_address_to_resource(np_pmu, 0, &res_xbar))
+ if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e7f798d55fbc..3fe99cb271a9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
+ c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
scache_size *= 4;
c->scache.waybit = 0;
+ c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr,
+ unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index c4ffd43d3996..48ce701557a4 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c5426157b..3901b80d4420 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return alloc_bootmem_align(size, align);
}
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool nomap)
+{
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+ return 0;
+}
+
void __init early_init_devtree(void *params)
{
__be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6e57ffa5db27..6044d9be28b4 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
}
#endif /* CONFIG_BLK_DEV_INITRD */
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
unflatten_and_copy_device_tree();
setup_cpuinfo();
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 411994551afc..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
}
if (len & ~VMX_ALIGN_MASK) {
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+ disable_kernel_altivec();
pagefault_enable();
+ preempt_enable();
}
tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch (get_xop(instruction)) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
* flush all bytes from start through stop-1 inclusive
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL_TOC(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
*
* flush all bytes from start to stop-1 inclusive
*/
-_GLOBAL(flush_dcache_range)
+_GLOBAL_TOC(flush_dcache_range)
/*
* Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
+ /*
+ * Fixup HFSCR:TM based on CPU features. The bit is set by our
+ * early asm init because at that point we haven't updated our
+ * CPU features from firmware and device-tree. Here we have,
+ * so let's do it.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
/* start new resize */
resize = kzalloc(sizeof(*resize), GFP_KERNEL);
+ if (!resize) {
+ ret = -ENOMEM;
+ goto out;
+ }
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
+ unsigned int use_local;
+
+ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && local) {
+ if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index fa95041fa9f6..33ca29333e18 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif
+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 136932ff4250..3ea1554d04b3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "=Q" (*(to)) \
+ : "=d" (__rc), "+Q" (*(to)) \
: "d" (size), "Q" (*(from)), \
"d" (__reg0), "K" (-EFAULT) \
: "cc"); \
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..5dab859b0d54 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
{
struct pcpu *pcpu = pcpu_devices;
+ WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->address = stap();
pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
- set_cpu_present(0, true);
- set_cpu_online(0, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_setup_processor_id(void)
{
+ pcpu_devices[0].address = stap();
S390_lowcore.cpu_nr = 0;
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
}
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..ddbffb715b40 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -168,8 +168,7 @@ union page_table_entry {
unsigned long z : 1; /* Zero Bit */
unsigned long i : 1; /* Page-Invalid Bit */
unsigned long p : 1; /* DAT-Protection Bit */
- unsigned long co : 1; /* Change-Recording Override */
- unsigned long : 8;
+ unsigned long : 9;
};
};
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_PAGE_TRANSLATION;
if (pte.z)
return PGM_TRANSLATION_SPEC;
- if (pte.co && !edat1)
- return PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
raddr.pfra = pte.pfra;
real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
- if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
+ if (!rc && pte.z)
rc = PGM_TRANSLATION_SPEC;
shadow_page:
pte.p |= dat_protection;
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index f294dd42fc7d..5961b2d8398a 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -17,6 +17,7 @@
#define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
+#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
-#define HUGE_MAX_HSTATE 3
+#define HUGE_MAX_HSTATE 4
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8a598528ec1f..6fbd931f0570 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return pte_pfn(pte);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline unsigned long pmd_dirty(pmd_t pmd)
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_dirty(pte);
+ return pte_write(pte);
}
-static inline unsigned long pmd_young(pmd_t pmd)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long pmd_dirty(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_young(pte);
+ return pte_dirty(pte);
}
-static inline unsigned long pmd_write(pmd_t pmd)
+static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_write(pte);
+ return pte_young(pte);
}
static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 365d4cb267b4..dd27159819eb 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -18,12 +18,6 @@
#include <asm/signal.h>
#include <asm/page.h>
-/*
- * The sparc has no problems with write protection
- */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
* That one page is used to protect kernel from intruders, so that
* we can make our access_ok test faster
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6448cfc8292f..b58ee9018433 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,10 +18,6 @@
#include <asm/ptrace.h>
#include <asm/page.h>
-/* The sparc has no problems with write protection */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
/*
* User lives in his very own context, and cannot reference us. Note
* that TASK_SIZE is a misnomer, it really gives maximum user virtual
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da152c20..44101196d02b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -96,6 +96,7 @@ sparc64_boot:
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
+ nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
nop
ba,a,pt %xcc, 80f
+ nop
niagara4_patch:
call niagara4_patch_copyops
nop
@@ -622,6 +624,7 @@ niagara4_patch:
nop
ba,a,pt %xcc, 80f
+ nop
niagara2_patch:
call niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
nop
ba,a,pt %xcc, 80f
+ nop
niagara_patch:
call niagara_patch_copyops
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 34b4933900bf..9276d2f0dd86 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -82,6 +82,7 @@ do_stdfmna:
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
+ nop
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 216948ca4382..709a82ebd294 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
+ nop
user_rtt_fill_fixup_dax:
ba,pt %xcc, user_rtt_fill_fixup_common
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index 4a73009f66a5..d7e540842809 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
rd %pc, %g7
ba,a,pt %xcc, 2f
+ nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index 6179e19bc9b9..c19f352f46c7 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -352,6 +352,7 @@ sun4v_mna:
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
+ nop
/* Privileged Action. */
sun4v_privact:
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
index 5604a2b051d4..364af3250646 100644
--- a/arch/sparc/kernel/urtt_fill.S
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
1: call spitfire_data_access_exception
nop
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 855019a8590e..1ee173cc3c39 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -152,6 +152,8 @@ fill_fixup_dax:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index c629dbd121b6..64dcd6cdb606 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 170f
nop
ba,a,pt %xcc, 180f
+ nop
4: /* 32 <= low bits < 48 */
blu 150f
nop
ba,a,pt %xcc, 160f
+ nop
5: /* 0 < low bits < 32 */
blu,a 6f
cmp %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 130f
nop
ba,a,pt %xcc, 140f
+ nop
6: /* 0 < low bits < 16 */
bgeu 120f
nop
@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %o2, 85f
sub %o0, %o1, GLOBAL_SPARE
ba,a,pt %XCC, 90f
+ nop
.align 64
75: /* 16 < len <= 64 */
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 75bb93b1437f..78ea962edcbe 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
+ nop
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S
index 41da4bdd95cb..7c0c81f18837 100644
--- a/arch/sparc/lib/NG4memset.S
+++ b/arch/sparc/lib/NG4memset.S
@@ -102,4 +102,5 @@ NG4bzero:
bne,pt %icc, 1b
add %o0, 0x30, %o0
ba,a,pt %icc, .Lpostloop
+ nop
.size NG4bzero,.-NG4bzero
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index d88c4ed50a00..cd654a719b27 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
brz,pt %i2, 85f
sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
+ nop
.align 64
70: /* 16 < len <= 64 */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 323bc6b6e3ad..ee5273ad918d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) {
+ case HPAGE_2GB_SHIFT:
+ hugepage_size = _PAGE_SZ2GB_4V;
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
case HPAGE_256MB_SHIFT:
hugepage_size = _PAGE_SZ256MB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
unsigned int shift;
switch (tte_szbits) {
+ case _PAGE_SZ2GB_4V:
+ shift = HPAGE_2GB_SHIFT;
+ break;
case _PAGE_SZ256MB_4V:
shift = HPAGE_256MB_SHIFT;
break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
if (!pmd)
return NULL;
- if (sz == PMD_SHIFT)
+ if (sz >= PMD_SIZE)
pte = (pte_t *)pmd;
else
pte = pte_alloc_map(mm, pmd, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ccd455328989..0cda653ae007 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
+ case HPAGE_2GB_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_2GB;
+ hv_pgsz_idx = HV_PGSZ_IDX_2GB;
+ break;
case HPAGE_256MB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_256MB;
hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
if ((long)addr < 0L) {
unsigned long pa = __pa(addr);
- if ((addr >> max_phys_bits) != 0UL)
+ if ((pa >> max_phys_bits) != 0UL)
return false;
return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index def82f6d626f..8e76ebba2986 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
enum mbus_module srmmu_modtype;
static unsigned int hwbug_bitmask;
int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
int vac_line_size;
extern struct resource sparc_iomap;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index afda3bbf7854..ee8066c3d96c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte);
- tlb_batch_add_one(mm, vaddr, exec, false);
+ tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
}
pte++;
vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
- tlb_batch_add_one(mm, addr, exec, true);
+ tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
- true);
+ REAL_HPAGE_SHIFT);
} else {
tlb_batch_pmd_scan(mm, addr, orig);
}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0a04811f06b7..bedf08b22a47 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags);
- if (tb->hugepage_shift < HPAGE_SHIFT) {
+ if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
spin_lock_irqsave(&mm->context.lock, flags);
- if (hugepage_shift < HPAGE_SHIFT) {
+ if (hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8e9725c607ea..5accfbdee3f0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,6 +54,8 @@
static DEFINE_MUTEX(mce_chrdev_read_mutex);
+static int mce_chrdev_open_count; /* #times opened */
+
#define mce_log_get_idx_check(p) \
({ \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
if (atomic_read(&num_notifiers) > 2)
return NOTIFY_DONE;
+ /* Don't print when mcelog is running */
+ if (mce_chrdev_open_count > 0)
+ return NOTIFY_DONE;
+
__print_mce(m);
return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
*/
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count; /* #times opened */
static int mce_chrdev_open_exclu; /* already open exclusive? */
static int mce_chrdev_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2ee00dbbbd51..259e9b28ccf8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PREEMPTION_TIMER:
return false;
+ case EXIT_REASON_PML_FULL:
+ /* We don't expose PML support to L1. */
+ return false;
default:
return true;
}
@@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
+ if (enable_pml) {
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit, this happens
+ * to be equivalent to simply resetting the fields in vmcs02.
+ */
+ ASSERT(vmx->pml_pg);
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d70edbc..4ddbfd57a7c8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+ return off + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
#define __pa(x) \
((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
#define __va(x) \
((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
#define pfn_valid(pfn) \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index cd400af4a6b2..6be7eb27fd29 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
#define __NR_pkey_free 350
__SYSCALL(350, sys_pkey_free, 1)
-#define __NR_syscall_count 351
+#define __NR_statx 351
+__SYSCALL(351, sys_statx, 5)
+
+#define __NR_syscall_count 352
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c82c43bff296..bae697a06a98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
static int show_trace_cb(struct stackframe *frame, void *data)
{
- if (kernel_text_address(frame->pc)) {
- pr_cont(" [<%08lx>]", frame->pc);
- print_symbol(" %s\n", frame->pc);
- }
+ if (kernel_text_address(frame->pc))
+ pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
return 0;
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 09af8ff18719..c974a1bbf4cb 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
- struct elevator_queue *e = hctx->queue->elevator;
+ struct request_queue *q = hctx->queue;
+ struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
bool did_work = false;
LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
+ did_work = blk_mq_dispatch_rq_list(q, &rq_list);
} else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(hctx, &rq_list);
+ blk_mq_dispatch_rq_list(q, &rq_list);
}
/*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (!rq)
break;
list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(hctx, &rq_list));
+ } while (blk_mq_dispatch_rq_list(q, &rq_list));
}
}
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true;
}
-static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (blk_mq_hctx_has_pending(hctx))
+ if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
+ return true;
+ }
}
+ return false;
}
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
-{
- struct request_queue *q = hctx->queue;
- unsigned int i;
+/**
+ * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
+ * @pos: loop cursor.
+ * @skip: the list element that will not be examined. Iteration starts at
+ * @skip->next.
+ * @head: head of the list to examine. This list must have at least one
+ * element, namely @skip.
+ * @member: name of the list_head structure within typeof(*pos).
+ */
+#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
+ for ((pos) = (skip); \
+ (pos = (pos)->member.next != (head) ? list_entry_rcu( \
+ (pos)->member.next, typeof(*pos), member) : \
+ list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
+ (pos) != (skip); )
- if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
- if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_restart_hctx(hctx);
+/*
+ * Called after a driver tag has been freed to check whether a hctx needs to
+ * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
+ * queues in a round-robin fashion if the tag set of @hctx is shared with other
+ * hardware queues.
+ */
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
+{
+ struct blk_mq_tags *const tags = hctx->tags;
+ struct blk_mq_tag_set *const set = hctx->queue->tag_set;
+ struct request_queue *const queue = hctx->queue, *q;
+ struct blk_mq_hw_ctx *hctx2;
+ unsigned int i, j;
+
+ if (set->flags & BLK_MQ_F_TAG_SHARED) {
+ rcu_read_lock();
+ list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
+ tag_set_list) {
+ queue_for_each_hw_ctx(q, hctx2, i)
+ if (hctx2->tags == tags &&
+ blk_mq_sched_restart_hctx(hctx2))
+ goto done;
+ }
+ j = hctx->queue_num + 1;
+ for (i = 0; i < queue->nr_hw_queues; i++, j++) {
+ if (j == queue->nr_hw_queues)
+ j = 0;
+ hctx2 = queue->queue_hw_ctx[j];
+ if (hctx2->tags == tags &&
+ blk_mq_sched_restart_hctx(hctx2))
+ break;
}
+done:
+ rcu_read_unlock();
} else {
blk_mq_sched_restart_hctx(hctx);
}
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
}
}
-int blk_mq_sched_setup(struct request_queue *q)
+static int blk_mq_sched_alloc_tags(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+ int ret;
+
+ hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
+ set->reserved_tags);
+ if (!hctx->sched_tags)
+ return -ENOMEM;
+
+ ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
+ if (ret)
+ blk_mq_sched_free_tags(set, hctx, hctx_idx);
+
+ return ret;
+}
+
+static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
- int ret, i;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_sched_free_tags(set, hctx, i);
+}
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return 0;
+
+ return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
+}
+
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return;
+
+ blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
+}
+
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret;
+
+ if (!e) {
+ q->elevator = NULL;
+ return 0;
+ }
/*
* Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
*/
q->nr_requests = 2 * BLKDEV_MAX_RQ;
- /*
- * We're switching to using an IO scheduler, so setup the hctx
- * scheduler tags and switch the request map from the regular
- * tags to scheduler tags. First allocate what we need, so we
- * can safely fail and fallback, if needed.
- */
- ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
- q->nr_requests, set->reserved_tags);
- if (!hctx->sched_tags) {
- ret = -ENOMEM;
- break;
- }
- ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
+ ret = blk_mq_sched_alloc_tags(q, hctx, i);
if (ret)
- break;
+ goto err;
}
- /*
- * If we failed, free what we did allocate
- */
- if (ret) {
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->sched_tags)
- continue;
- blk_mq_sched_free_tags(set, hctx, i);
- }
-
- return ret;
- }
+ ret = e->ops.mq.init_sched(q, e);
+ if (ret)
+ goto err;
return 0;
+
+err:
+ blk_mq_sched_tags_teardown(q);
+ q->elevator = NULL;
+ return ret;
}
-void blk_mq_sched_teardown(struct request_queue *q)
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
- struct blk_mq_tag_set *set = q->tag_set;
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ if (e->type->ops.mq.exit_sched)
+ e->type->ops.mq.exit_sched(e);
+ blk_mq_sched_tags_teardown(q);
+ q->elevator = NULL;
}
int blk_mq_sched_init(struct request_queue *q)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
-int blk_mq_sched_setup(struct request_queue *q);
-void blk_mq_sched_teardown(struct request_queue *q);
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
int blk_mq_sched_init(struct request_queue *q);
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
-/*
- * Mark a hardware queue and the request queue it belongs to as needing a
- * restart.
- */
-static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
-{
- struct request_queue *q = hctx->queue;
-
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
- set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
-}
-
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6b6e7bc041db..572966f49596 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
- blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
blk_mq_sched_completed_request(hctx, rq);
- blk_mq_sched_restart_queues(hctx);
+ blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
@@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
};
- if (rq->tag != -1) {
-done:
- if (hctx)
- *hctx = data.hctx;
- return true;
- }
+ if (rq->tag != -1)
+ goto done;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
@@ -863,10 +858,12 @@ done:
atomic_inc(&data.hctx->nr_active);
}
data.hctx->tags->rqs[rq->tag] = rq;
- goto done;
}
- return false;
+done:
+ if (hctx)
+ *hctx = data.hctx;
+ return rq->tag != -1;
}
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
return true;
}
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
- struct request_queue *q = hctx->queue;
+ struct blk_mq_hw_ctx *hctx;
struct request *rq;
LIST_HEAD(driver_list);
struct list_head *dptr;
int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+ if (list_empty(list))
+ return false;
+
/*
* Start off with dptr being NULL, so we start the first request
* immediately, even if we have more pending.
@@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
* Now process all the entries, sending them to the driver.
*/
errors = queued = 0;
- while (!list_empty(list)) {
+ do {
struct blk_mq_queue_data bd;
rq = list_first_entry(list, struct request, queuelist);
@@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
*/
if (!dptr && list->next != list->prev)
dptr = &driver_list;
- }
+ } while (!list_empty(list));
hctx->dispatched[queued_to_index(queued)]++;
@@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return hctx->next_cpu;
}
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+ unsigned long msecs)
{
if (unlikely(blk_mq_hctx_stopped(hctx) ||
!blk_mq_hw_queue_mapped(hctx)))
@@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
put_cpu();
}
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+ if (msecs == 0)
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work);
+ else
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->delayed_run_work,
+ msecs_to_jiffies(msecs));
+}
+
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+ __blk_mq_delay_run_hw_queue(hctx, true, msecs);
+}
+EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ __blk_mq_delay_run_hw_queue(hctx, async, 0);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
__blk_mq_run_hw_queue(hctx);
}
+static void blk_mq_delayed_run_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
+
+ __blk_mq_run_hw_queue(hctx);
+}
+
static void blk_mq_delay_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
@@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
node = hctx->numa_node = set->numa_node;
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
@@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
+ if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+ goto exit_hctx;
+
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto exit_hctx;
+ goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
@@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
+ sched_exit_hctx:
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- blk_mq_sched_teardown(q);
-
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
@@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
}
+static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+{
+ if (set->ops->map_queues)
+ return set->ops->map_queues(set);
+ else
+ return blk_mq_map_queues(set);
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->mq_map)
goto out_free_tags;
- if (set->ops->map_queues)
- ret = set->ops->map_queues(set);
- else
- ret = blk_mq_map_queues(set);
+ ret = blk_mq_update_queue_map(set);
if (ret)
goto out_free_mq_map;
@@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
blk_mq_freeze_queue(q);
set->nr_hw_queues = nr_hw_queues;
+ blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b79f9a7d8cf6..660a17e1d033 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
+bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c44b321335f3..37f0b3ad635e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
if (q->elevator) {
ioc_clear_queue(q);
- elevator_exit(q->elevator);
+ elevator_exit(q, q->elevator);
}
blk_exit_rl(&q->root_rl);
diff --git a/block/elevator.c b/block/elevator.c
index 01139f549b5b..dbeecf7be719 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
}
}
- if (e->uses_mq) {
- err = blk_mq_sched_setup(q);
- if (!err)
- err = e->ops.mq.init_sched(q, e);
- } else
+ if (e->uses_mq)
+ err = blk_mq_init_sched(q, e);
+ else
err = e->ops.sq.elevator_init_fn(q, e);
- if (err) {
- if (e->uses_mq)
- blk_mq_sched_teardown(q);
+ if (err)
elevator_put(e);
- }
return err;
}
EXPORT_SYMBOL(elevator_init);
-void elevator_exit(struct elevator_queue *e)
+void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->uses_mq && e->type->ops.mq.exit_sched)
- e->type->ops.mq.exit_sched(e);
+ blk_mq_exit_sched(q, e);
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
+static int elevator_switch_mq(struct request_queue *q,
+ struct elevator_type *new_e)
+{
+ int ret;
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ if (q->elevator) {
+ if (q->elevator->registered)
+ elv_unregister_queue(q);
+ ioc_clear_queue(q);
+ elevator_exit(q, q->elevator);
+ }
+
+ ret = blk_mq_init_sched(q, new_e);
+ if (ret)
+ goto out;
+
+ if (new_e) {
+ ret = elv_register_queue(q);
+ if (ret) {
+ elevator_exit(q, q->elevator);
+ goto out;
+ }
+ }
+
+ if (new_e)
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+ else
+ blk_add_trace_msg(q, "elv switch: none");
+
+out:
+ blk_mq_unfreeze_queue(q);
+ blk_mq_start_stopped_hw_queues(q, true);
+ return ret;
+
+}
+
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
bool old_registered = false;
int err;
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- }
+ if (q->mq_ops)
+ return elevator_switch_mq(q, new_e);
/*
* Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (old) {
old_registered = old->registered;
- if (old->uses_mq)
- blk_mq_sched_teardown(q);
-
- if (!q->mq_ops)
- blk_queue_bypass_start(q);
+ blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */
if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
}
/* allocate, init and register new elevator */
- if (new_e) {
- if (new_e->uses_mq) {
- err = blk_mq_sched_setup(q);
- if (!err)
- err = new_e->ops.mq.init_sched(q, new_e);
- } else
- err = new_e->ops.sq.elevator_init_fn(q, new_e);
- if (err)
- goto fail_init;
+ err = new_e->ops.sq.elevator_init_fn(q, new_e);
+ if (err)
+ goto fail_init;
- err = elv_register_queue(q);
- if (err)
- goto fail_register;
- } else
- q->elevator = NULL;
+ err = elv_register_queue(q);
+ if (err)
+ goto fail_register;
/* done, kill the old one and finish */
if (old) {
- elevator_exit(old);
- if (!q->mq_ops)
- blk_queue_bypass_end(q);
+ elevator_exit(q, old);
+ blk_queue_bypass_end(q);
}
- if (q->mq_ops) {
- blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
- }
-
- if (new_e)
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
- else
- blk_add_trace_msg(q, "elv switch: none");
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- if (q->mq_ops)
- blk_mq_sched_teardown(q);
- elevator_exit(q->elevator);
+ elevator_exit(q, q->elevator);
fail_init:
/* switch failed, restore and re-register old elevator */
if (old) {
q->elevator = old;
elv_register_queue(q);
- if (!q->mq_ops)
- blk_queue_bypass_end(q);
- }
- if (q->mq_ops) {
- blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
+ blk_queue_bypass_end(q);
}
return err;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index fb19e1cdb641..edc8663b5db3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
return -ENODEV;
/*
- * If the device has a _HID (or _CID) returning a valid ACPI/PNP
- * device ID, it is better to make it look less attractive here, so that
- * the other device with the same _ADR value (that may not have a valid
- * device ID) can be matched going forward. [This means a second spec
- * violation in a row, so whatever we do here is best effort anyway.]
+ * If the device has a _HID returning a valid ACPI/PNP device ID, it is
+ * better to make it look less attractive here, so that the other device
+ * with the same _ADR value (that may not have a valid device ID) can be
+ * matched going forward. [This means a second spec violation in a row,
+ * so whatever we do here is best effort anyway.]
*/
- return sta_present && list_empty(&adev->pnp.ids) ?
+ return sta_present && !adev->pnp.type.platform_id ?
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9b37a3692b3f..2bd683e2be02 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
goto fail_free_event;
}
+ if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
+ enable_irq_wake(irq);
+
list_add_tail(&event->node, &acpi_gpio->events);
return AE_OK;
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
+ if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
+ disable_irq_wake(event->irq);
+
free_irq(event->irq, event);
desc = event->desc;
if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
}
desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc))
break;
+ if (PTR_ERR(desc) == -EPROBE_DEFER)
+ return ERR_CAST(desc);
}
/* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 262056778f52..6a8129949333 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -32,7 +32,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
@@ -122,14 +122,6 @@ extern int amdgpu_param_buf_per_se;
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
-/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 2
-#define AMDGPU_MMHUB 0
-#define AMDGPU_GFXHUB 1
-
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
-
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@@ -312,12 +304,9 @@ struct amdgpu_gart_funcs {
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
-};
-
-/* provided by the mc block */
-struct amdgpu_mc_funcs {
/* adjust mc addr in fb for APU case */
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
+ uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
/* provided by the ih block */
@@ -379,7 +368,10 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va_mapping {
struct list_head list;
- struct interval_tree_node it;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
uint64_t offset;
uint64_t flags;
};
@@ -579,8 +571,6 @@ struct amdgpu_vmhub {
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
- uint32_t (*get_vm_protection_bits)(void);
};
/*
@@ -618,7 +608,6 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
- const struct amdgpu_mc_funcs *mc_funcs;
};
/*
@@ -1712,6 +1701,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD(reg, field, val) \
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
+#define WREG32_FIELD15(ip, idx, reg, field, val) \
+ WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
@@ -1887,12 +1882,14 @@ void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
+bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
+static inline bool amdgpu_has_atpx(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f52b1bf3d3d9..ad4329922f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -754,6 +754,35 @@ union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
};
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+
+ /* get any igp specific overrides */
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 8:
+ case 9:
+ return igp_info->info_8.ucUMAChannelNumber * 64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 4e0f488487f3..38d0fe32e5cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -148,6 +148,8 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 0218cea6be4d..a6649874e6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -237,7 +237,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
+ const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 97f661372a1c..ec71b9320561 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -161,7 +161,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
+ chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@@ -181,7 +181,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+ chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -192,7 +192,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
@@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
@@ -1242,6 +1242,7 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
if (r < 0)
return r;
@@ -1339,7 +1340,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ fences_user = (void __user *)(uintptr_t)(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@@ -1388,8 +1389,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
@@ -1397,8 +1398,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 83dda05325b8..483660742f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1040,43 +1040,60 @@ static bool amdgpu_check_pot_argument(int arg)
return (arg & (arg - 1)) == 0;
}
-static void amdgpu_get_block_size(struct amdgpu_device *adev)
-{
- /* from AI, asic starts to support multiple level VMPT */
- if (adev->asic_type >= CHIP_VEGA10) {
- if (amdgpu_vm_block_size != 9)
- dev_warn(adev->dev,
- "Multi-VMPT limits block size to one page!\n");
- amdgpu_vm_block_size = 9;
- return;
- }
+static void amdgpu_check_block_size(struct amdgpu_device *adev)
+{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (amdgpu_vm_block_size == -1) {
-
- /* Total bits covered by PD + PTs */
- unsigned bits = ilog2(amdgpu_vm_size) + 18;
-
- /* Make sure the PD is 4K in size up to 8GB address space.
- Above that split equal between PD and PTs */
- if (amdgpu_vm_size <= 8)
- amdgpu_vm_block_size = bits - 9;
- else
- amdgpu_vm_block_size = (bits + 3) / 2;
+ if (amdgpu_vm_block_size == -1)
+ return;
- } else if (amdgpu_vm_block_size < 9) {
+ if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
if (amdgpu_vm_block_size > 24 ||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
dev_warn(adev->dev, "VM page table size (%d) too large\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
+
+ return;
+
+def_value:
+ amdgpu_vm_block_size = -1;
+}
+
+static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+{
+ if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+ dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ if (amdgpu_vm_size < 1) {
+ dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ /*
+ * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
+ */
+ if (amdgpu_vm_size > 1024) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ return;
+
+def_value:
+ amdgpu_vm_size = -1;
}
/**
@@ -1108,28 +1125,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
}
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
+ amdgpu_check_vm_size(adev);
- if (amdgpu_vm_size < 1) {
- dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- amdgpu_get_block_size(adev);
+ amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
@@ -2249,9 +2247,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_resume(adev);
- if (r)
+ if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
-
+ return r;
+ }
amdgpu_fence_driver_resume(adev);
if (resume) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index ce15721cadda..96926a221bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -614,6 +614,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 400917fd7486..4e0f7d2d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -86,7 +86,7 @@ int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
-int amdgpu_vm_size = 64;
+int amdgpu_vm_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f85520d4e711..03a9c5cad222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -717,7 +717,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(long)args->value;
+ void __user *out = (void __user *)(uintptr_t)args->value;
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
@@ -729,6 +729,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
+ if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ break;
+ }
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13b487235a8b..a6b7e367a860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -316,9 +316,10 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
return -EINVAL;
if (!adev->irq.client[client_id].sources) {
- adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
- sizeof(struct amdgpu_irq_src),
- GFP_KERNEL);
+ adev->irq.client[client_id].sources =
+ kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
+ sizeof(struct amdgpu_irq_src *),
+ GFP_KERNEL);
if (!adev->irq.client[client_id].sources)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index dfb029ab3448..832be632478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -36,12 +36,6 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx(void);
-#else
-static inline bool amdgpu_has_atpx(void) { return false; }
-#endif
-
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
@@ -243,7 +237,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
- void __user *out = (void __user *)(long)info->return_pointer;
+ void __user *out = (void __user *)(uintptr_t)info->return_pointer;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 7ea3cacf9f9f..38f739fb727b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -31,6 +31,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
#include <drm/drmP.h>
#include <drm/drm.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5aac350b007f..cb89fff863c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- unsigned lpfn = 0;
-
- /* This forces a reallocation if the flag wasn't set before */
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
- places[c].lpfn = lpfn;
+ places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
+
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
c++;
}
@@ -651,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* A shared bo cannot be migrated to VRAM */
+ if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
+ return -EINVAL;
+
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
@@ -928,8 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
/* TODO: figure out how to map scattered VRAM to the CPU */
- if ((offset + size) <= adev->mc.visible_vram_size &&
- (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -937,7 +939,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4731015f6101..ed6e5799016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -130,7 +130,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
- };
+ }
amdgpu_bo_free_kernel(&cmd_buf_bo,
&cmd_buf_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index a87de18160a8..ee9d0f346d75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map,
TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
TP_fast_assign(
__entry->bo = bo_va->bo;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
),
TP_fast_assign(
- __entry->soffset = mapping->it.start;
- __entry->eoffset = mapping->it.last + 1;
+ __entry->soffset = mapping->start;
+ __entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 244bb9aacf86..35d53a0d9ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -529,40 +529,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
- if (mem->start == AMDGPU_BO_INVALID_OFFSET)
- return -EINVAL;
-
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = adev->mc.aper_base;
mem->bus.is_iomem = true;
-#ifdef __alpha__
- /*
- * Alpha: use bus.addr to hold the ioremap() return,
- * so we can modify bus.base below.
- */
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- else
- mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- if (!mem->bus.addr)
- return -ENOMEM;
-
- /*
- * Alpha: Use just the bus offset plus
- * the hose/domain memory base for bus.base.
- * It then can be used to build PTEs for VRAM
- * access, as done in ttm_bo_vm_fault().
- */
- mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
- adev->ddev->hose->dense_mem_base;
-#endif
break;
default:
return -EINVAL;
@@ -574,6 +546,18 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
+static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct drm_mm_node *mm = bo->mem.mm_node;
+ uint64_t size = mm->size;
+ uint64_t offset = page_offset;
+
+ page_offset = do_div(offset, size);
+ mm += offset;
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+}
+
/*
* TTM backend functions.
*/
@@ -1089,6 +1073,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
+ .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0b92dd0c1d70..2ca09f111f08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
start = amdgpu_bo_gpu_offset(bo);
- end = (mapping->it.last + 1 - mapping->it.start);
+ end = (mapping->last + 1 - mapping->start);
end = end * AMDGPU_GPU_PAGE_SIZE + start;
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0184197eb000..c853400805d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
}
if ((addr + (uint64_t)size) >
- ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ecef35a1fe33..ba8b8ae6234f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -122,9 +122,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
@@ -150,9 +148,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0235d7933efd..7ed5302b511a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/dma-fence-array.h>
+#include <linux/interval_tree_generic.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -51,6 +52,15 @@
* SI supports 16.
*/
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
+ START, LAST, static, amdgpu_vm_it)
+
+#undef START
+#undef LAST
+
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
@@ -90,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == 0)
/* For the root directory */
return adev->vm_manager.max_pfn >>
- (amdgpu_vm_block_size * adev->vm_manager.num_level);
+ (adev->vm_manager.block_size *
+ adev->vm_manager.num_level);
else if (level == adev->vm_manager.num_level)
/* For the page tables on the leaves */
- return AMDGPU_VM_PTE_COUNT;
+ return AMDGPU_VM_PTE_COUNT(adev);
else
/* Everything in between */
- return 1 << amdgpu_vm_block_size;
+ return 1 << adev->vm_manager.block_size;
}
/**
@@ -261,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned level)
{
unsigned shift = (adev->vm_manager.num_level - level) *
- amdgpu_vm_block_size;
+ adev->vm_manager.block_size;
unsigned pt_idx, from, to;
int r;
@@ -365,11 +376,19 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
}
-static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
+/**
+ * amdgpu_vm_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
{
return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter) ? true : false;
+ atomic_read(&adev->gpu_reset_counter);
}
/**
@@ -455,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
- if (amdgpu_vm_is_gpu_reset(adev, id))
+ if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
if (atomic64_read(&id->owner) != vm->client_id)
@@ -483,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
@@ -504,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- dma_fence_put(id->first);
- id->first = dma_fence_get(fence);
-
dma_fence_put(id->last_flush);
id->last_flush = NULL;
@@ -557,8 +572,8 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
u64 addr = mc_addr;
- if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
- addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
+ if (adev->gart.gart_funcs->adjust_mc_addr)
+ addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
return addr;
}
@@ -583,60 +598,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
+ bool vm_flush_needed = job->vm_needs_flush ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring);
+ unsigned patch_offset = 0;
int r;
- if (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_is_gpu_reset(adev, id) ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)) {
- unsigned patch_offset = 0;
+ if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ gds_switch_needed = true;
+ vm_flush_needed = true;
+ }
- if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ if (!vm_flush_needed && !gds_switch_needed)
+ return 0;
- if (ring->funcs->emit_pipeline_sync &&
- (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)))
- amdgpu_ring_emit_pipeline_sync(ring);
+ if (ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
- amdgpu_vm_is_gpu_reset(adev, id))) {
- struct dma_fence *fence;
- u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ if (ring->funcs->emit_pipeline_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
- trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+ if (ring->funcs->emit_vm_flush && vm_flush_needed) {
+ u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ struct dma_fence *fence;
- r = amdgpu_fence_emit(ring, &fence);
- if (r)
- return r;
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
- mutex_lock(&adev->vm_manager.lock);
- dma_fence_put(id->last_flush);
- id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
- }
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
- }
+ mutex_lock(&adev->vm_manager.lock);
+ dma_fence_put(id->last_flush);
+ id->last_flush = fence;
+ mutex_unlock(&adev->vm_manager.lock);
+ }
- if (ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ if (gds_switch_needed) {
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+ }
- /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
- if (ring->funcs->emit_switch_buffer) {
- amdgpu_ring_emit_switch_buffer(ring);
- amdgpu_ring_emit_switch_buffer(ring);
- }
+ if (ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+ /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+ if (ring->funcs->emit_switch_buffer) {
+ amdgpu_ring_emit_switch_buffer(ring);
+ amdgpu_ring_emit_switch_buffer(ring);
}
return 0;
}
@@ -960,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
unsigned idx, level = p->adev->vm_manager.num_level;
while (entry->entries) {
- idx = addr >> (amdgpu_vm_block_size * level--);
+ idx = addr >> (p->adev->vm_manager.block_size * level--);
idx %= amdgpu_bo_size(entry->bo) / 8;
entry = &entry->entries[idx];
}
@@ -987,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+ struct amdgpu_device *adev = params->adev;
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
@@ -1011,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
@@ -1039,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
@@ -1186,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* reserve space for one command every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
*/
- ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
+ ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
/* padding, etc. */
ndw = 64;
@@ -1301,7 +1319,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1353,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_entries - 1);
+ last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1368,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
start = last + 1;
- } while (unlikely(start != mapping->it.last + 1));
+ } while (unlikely(start != mapping->last + 1));
return 0;
}
@@ -1518,7 +1536,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
if (fence)
dma_fence_wait(fence, false);
- amdgpu_vm_prt_put(cb->adev);
+ amdgpu_vm_prt_put(adev);
} else {
cb->adev = adev;
if (!fence || dma_fence_add_callback(fence, &cb->cb,
@@ -1724,9 +1742,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t saddr, uint64_t offset,
uint64_t size, uint64_t flags)
{
- struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm;
- struct interval_tree_node *it;
uint64_t eaddr;
/* validate the parameters */
@@ -1743,14 +1760,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- if (it) {
- struct amdgpu_bo_va_mapping *tmp;
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
- tmp->it.start, tmp->it.last + 1);
+ "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -1759,13 +1774,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -ENOMEM;
INIT_LIST_HEAD(&mapping->list);
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@@ -1823,13 +1838,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@@ -1860,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
list_for_each_entry(mapping, &bo_va->valids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
@@ -1868,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
valid = false;
list_for_each_entry(mapping, &bo_va->invalids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
@@ -1877,7 +1892,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
}
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -1905,7 +1920,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size)
{
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
- struct interval_tree_node *it;
LIST_HEAD(removed);
uint64_t eaddr;
@@ -1927,43 +1941,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
INIT_LIST_HEAD(&after->list);
/* Now gather all removed mappings */
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- while (it) {
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
- it = interval_tree_iter_next(it, saddr, eaddr);
-
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ while (tmp) {
/* Remember mapping split at the start */
- if (tmp->it.start < saddr) {
- before->it.start = tmp->it.start;
- before->it.last = saddr - 1;
+ if (tmp->start < saddr) {
+ before->start = tmp->start;
+ before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
list_add(&before->list, &tmp->list);
}
/* Remember mapping split at the end */
- if (tmp->it.last > eaddr) {
- after->it.start = eaddr + 1;
- after->it.last = tmp->it.last;
+ if (tmp->last > eaddr) {
+ after->start = eaddr + 1;
+ after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->it.start - tmp->it.start;
+ after->offset += after->start - tmp->start;
after->flags = tmp->flags;
list_add(&after->list, &tmp->list);
}
list_del(&tmp->list);
list_add(&tmp->list, &removed);
+
+ tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
}
/* And free them up */
list_for_each_entry_safe(tmp, next, &removed, list) {
- interval_tree_remove(&tmp->it, &vm->va);
+ amdgpu_vm_it_remove(tmp, &vm->va);
list_del(&tmp->list);
- if (tmp->it.start < saddr)
- tmp->it.start = saddr;
- if (tmp->it.last > eaddr)
- tmp->it.last = eaddr;
+ if (tmp->start < saddr)
+ tmp->start = saddr;
+ if (tmp->last > eaddr)
+ tmp->last = eaddr;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
@@ -1971,7 +1984,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
- interval_tree_insert(&before->it, &vm->va);
+ amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@@ -1980,7 +1993,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
- interval_tree_insert(&after->it, &vm->va);
+ amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@@ -2014,13 +2027,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
amdgpu_vm_free_mapping(adev, vm, mapping,
bo_va->last_pt_update);
}
@@ -2051,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
+static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+{
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(vm_size) + 18;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (vm_size <= 8)
+ return (bits - 9);
+ else
+ return ((bits + 3) / 2);
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size and block size
+ *
+ * @adev: amdgpu_device pointer
+ * @vm_size: the default vm size if it's set auto
+ */
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+{
+ /* adjust vm size firstly */
+ if (amdgpu_vm_size == -1)
+ adev->vm_manager.vm_size = vm_size;
+ else
+ adev->vm_manager.vm_size = amdgpu_vm_size;
+
+ /* block size depends on vm size */
+ if (amdgpu_vm_block_size == -1)
+ adev->vm_manager.block_size =
+ amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
+ else
+ adev->vm_manager.block_size = amdgpu_vm_block_size;
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size);
+}
+
/**
* amdgpu_vm_init - initialize a vm instance
*
@@ -2062,7 +2113,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
- AMDGPU_VM_PTE_COUNT * 8);
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
@@ -2162,9 +2213,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
+ rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
@@ -2227,7 +2278,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fbe17bf73a00..d9e57290dc71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
@@ -76,6 +76,14 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* max number of VMHUB */
+#define AMDGPU_MAX_VMHUBS 2
+#define AMDGPU_GFXHUB 0
+#define AMDGPU_MMHUB 1
+
+/* hardcode that limit for now */
+#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
@@ -123,7 +131,6 @@ struct amdgpu_vm {
struct amdgpu_vm_id {
struct list_head list;
- struct dma_fence *first;
struct amdgpu_sync active;
struct dma_fence *last_flush;
atomic64_t owner;
@@ -155,6 +162,8 @@ struct amdgpu_vm_manager {
uint64_t max_pfn;
uint32_t num_level;
+ uint64_t vm_size;
+ uint32_t block_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
@@ -225,5 +234,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9e577e3d3147..a4831fe0223b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -93,7 +93,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
@@ -106,8 +105,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
- place->lpfn || amdgpu_vram_page_split == -1) {
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
@@ -124,12 +123,14 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
+ mem->start = 0;
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
+ unsigned long start;
if (pages == pages_per_node)
alignment = pages_per_node;
@@ -141,11 +142,19 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
+ /* Calculate a virtual BO start address to easily check if
+ * everything is CPU accessible.
+ */
+ start = nodes[i].start + nodes[i].size;
+ if (start > mem->num_pages)
+ start -= mem->num_pages;
+ else
+ start = 0;
+ mem->start = max(mem->start, start);
pages_left -= pages;
}
spin_unlock(&mgr->lock);
- mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index daf003dd2351..ba98d35340a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 3a7296724457..e59bc42df18c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 8ccada5d6f39..307269bda4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -986,7 +973,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
@@ -996,8 +983,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
@@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 6943f2641c90..6df7a28e8aac 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e0fa0d30e162..dad8a4cd1b37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4565,6 +4565,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.kiq.ring.ready = false;
}
udelay(50);
}
@@ -4721,14 +4722,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
-
- if (ring->use_doorbell)
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 0);
+ tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
+ CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN,
+ ring->use_doorbell ? 1 : 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -4816,13 +4813,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+ WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
@@ -4834,10 +4828,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
- if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
- if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
@@ -4894,11 +4888,8 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
/* activate the queue */
WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(mmCP_PQ_STATUS);
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(mmCP_PQ_STATUS, tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@@ -5471,19 +5462,18 @@ static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
{
int i;
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
- u32 tmp;
- tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
- tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
- DEQUEUE_REQ, 2);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
}
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_pre_soft_reset(void *handle)
@@ -5589,11 +5579,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
WREG32(mmCP_HQD_PQ_RPTR, 0);
WREG32(mmCP_HQD_PQ_WPTR, 0);
vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_post_soft_reset(void *handle)
@@ -6986,40 +6978,24 @@ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
- uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ);
- if (ring->me == 1)
- target = mmCP_ME1_PIPE0_INT_CNTL;
- else
- target = mmCP_ME2_PIPE0_INT_CNTL;
- target += ring->pipe;
-
switch (type) {
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
- if (state == AMDGPU_IRQ_STATE_DISABLE) {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(target, tmp);
- } else {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(target, tmp);
- }
+ WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ if (ring->me == 1)
+ WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ else
+ WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
break;
default:
BUG(); /* kiq only support GENERIC2_INT now */
@@ -7159,8 +7135,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 669bb98fc45d..a447b70841c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1288,9 +1288,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL));
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
@@ -1395,13 +1393,9 @@ void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
@@ -1410,10 +1404,8 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@@ -1497,14 +1489,10 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
int i;
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
- if (enable) {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
+ if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
@@ -2020,13 +2008,10 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
mqd->cp_hqd_eop_base_addr_lo);
@@ -2118,11 +2103,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@@ -2366,177 +2348,6 @@ static int gfx_v9_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void gfx_v9_0_print_status(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GFX 9.x registers\n");
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT)));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)));
-
- for (i = 0; i < 32; i++) {
- dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4));
- }
- for (i = 0; i < 16; i++) {
- dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4));
- }
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- dev_info(adev->dev, " se: %d\n", i);
- gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG)));
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1)));
- }
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)));
-
- dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS)));
- dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1)));
- dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL)));
- dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)));
- dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG)));
- dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)));
- dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3)));
- dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1)));
- dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE)));
- dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES)));
- dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL)));
- dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS)));
- dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION)));
- dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE)));
- dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE)));
- dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE)));
- dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE)));
-
- dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)));
- dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT)));
- dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID)));
-
- dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER)));
-
- dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY)));
- dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE)));
- dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI)));
- dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL)));
-
- dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR)));
- dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK)));
-
- dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)));
- dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)));
- dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT)));
- dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX)));
- dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK)));
- dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL)));
-
- dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6)));
- dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12)));
- dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3)));
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
- dev_info(adev->dev, " VM %d:\n", i);
- dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)));
- dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES)));
- }
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
static int gfx_v9_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
@@ -2569,8 +2380,7 @@ static int gfx_v9_0_soft_reset(void *handle)
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- if (grbm_soft_reset ) {
- gfx_v9_0_print_status((void *)adev);
+ if (grbm_soft_reset) {
/* stop the rlc */
gfx_v9_0_rlc_stop(adev);
@@ -2596,7 +2406,6 @@ static int gfx_v9_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- gfx_v9_0_print_status((void *)adev);
}
return 0;
}
@@ -3148,6 +2957,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -3157,7 +2967,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32
@@ -3376,21 +3185,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@@ -3446,20 +3246,12 @@ static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@@ -3473,21 +3265,12 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
default:
break;
}
@@ -3759,8 +3542,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -3975,9 +3756,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
ring->pipe,
ring->queue, 0);
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
/* write the EOP addr */
BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
@@ -4121,11 +3900,8 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
amdgpu_bo_kunmap(ring->mqd_obj);
amdgpu_bo_unreserve(ring->mqd_obj);
- if (use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 30ef3126c8a9..005075ff00f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -299,36 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int gfxhub_v1_0_early_init(void *handle)
{
return 0;
@@ -361,9 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d9586601a437..631aef38126d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ ((adev->vm_manager.block_size - 9)
+ << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v6_0_set_fault_enable_default(adev, false);
else
@@ -848,7 +849,8 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c0a6015cca5..92abe12d92bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -37,6 +37,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
@@ -325,48 +327,51 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -639,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
@@ -998,7 +1003,8 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d19d1c5e2847..f2ccefc66fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -38,6 +38,8 @@
#include "vid.h"
#include "vi.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -487,48 +489,51 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -848,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v8_0_set_fault_enable_default(adev, false);
@@ -1082,7 +1087,8 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index df69aae99df4..3b045e0b114e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -75,11 +75,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i;
+ bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -89,7 +96,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -100,7 +106,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -110,7 +115,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -129,8 +133,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB];
- struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
uint32_t status = 0;
u64 addr;
@@ -138,13 +141,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (!amdgpu_sriov_vf(adev)) {
- if (entry->vm_id_src) {
- status = RREG32(mmhub->vm_l2_pro_fault_status);
- WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
- } else {
- status = RREG32(gfxhub->vm_l2_pro_fault_status);
- WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
- }
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
@@ -175,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vm_id*/
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -204,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = hub->get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
@@ -337,30 +354,23 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
-};
-
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
-{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
-}
-
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
}
-static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
+static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
+ .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
+ .get_invalidate_req = gmc_v9_0_get_invalidate_req,
};
-static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
{
- adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
}
static int gmc_v9_0_early_init(void *handle)
@@ -368,7 +378,6 @@ static int gmc_v9_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev);
- gmc_v9_0_set_mc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
return 0;
@@ -511,7 +520,12 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 3;
+
+ /* TODO: fix num_level for APU when updating vm size and block size */
+ if (adev->flags & AMD_IS_APU)
+ adev->vm_manager.num_level = 1;
+ else
+ adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
@@ -543,9 +557,20 @@ static int gmc_v9_0_sw_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ amdgpu_vm_adjust_size(adev, 64);
} else {
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ /*
+ * To fulfill 4-level page support,
+ * vm size is 256TB (48bit), maximum size of Vega10,
+ * block size 512 (9bit)
+ */
+ adev->vm_manager.vm_size = 1U << 18;
+ adev->vm_manager.block_size = 9;
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size,
+ adev->vm_manager.block_size);
}
/* This interrupt is VMC page fault.*/
@@ -557,14 +582,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- /* Because of four level VMPTs, vm size is at least 512GB.
- * The maximum size is 256TB (48bit).
- */
- if (amdgpu_vm_size < 512) {
- DRM_WARN("VM size is at least 512GB!\n");
- amdgpu_vm_size = 512;
- }
- adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 266a0f47a908..62684510ddcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -317,36 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int mmhub_v1_0_early_init(void *handle)
{
return 0;
@@ -379,9 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index cfd5e54777bb..1493301b6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -28,6 +28,7 @@
#include "vega10/GC/gc_9_0_offset.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
#include "soc15.h"
+#include "vega10_ih.h"
#include "soc15_common.h"
#include "mxgpu_ai.h"
@@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
return r;
}
-static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
@@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
if (req == IDH_REQ_GPU_INIT_ACCESS ||
req == IDH_REQ_GPU_FINI_ACCESS ||
req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r)
return r;
}
@@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return 0;
}
+static int xgpu_ai_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
@@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ /* wait until RCV_MSG become 3 */
+ if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
+ pr_err("failed to recieve FLR_CMPL\n");
+ return;
+ }
+
+ /* Trigger recovery due to world switch failure */
+ amdgpu_sriov_gpu_reset(adev, false);
+}
+
+static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int r;
+
+ /* see what event we get */
+ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+
+ /* only handle FLR_NOTIFY now */
+ if (!r)
+ schedule_work(&adev->virt.flr_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_ack_irq,
+ .process = xgpu_ai_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_rcv_irq,
+ .process = xgpu_ai_mailbox_rcv_irq,
+};
+
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
+ .reset_gpu = xgpu_ai_request_reset,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index bf8ab8fd4367..9aefc44d2c34 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 150000
+#define AI_MAILBOX_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -44,4 +44,9 @@ enum idh_event {
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 5191c45ffdf3..c3588d1c7cb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -491,7 +491,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
ucode_size = ucode->ucode_size;
ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
+ while (ucode_size) {
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
if (*ucode_mem != fw_sram_reg_val)
@@ -508,14 +508,10 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- uint32_t reg, reg_val;
+ uint32_t reg;
- reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000;
- WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val);
+ reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
+ WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
- if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
+ return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2dd2b20d727e..21f38d882335 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bb14a45997b5..385de8617075 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -106,6 +106,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@@ -125,6 +127,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@@ -493,7 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
+ amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -558,6 +563,7 @@ static int soc15_common_early_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
+ xgpu_ai_mailbox_set_irq_funcs(adev);
}
/*
@@ -610,8 +616,23 @@ static int soc15_common_early_init(void *handle)
return 0;
}
+static int soc15_common_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_get_irq(adev);
+
+ return 0;
+}
+
static int soc15_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -642,6 +663,8 @@ static int soc15_common_hw_fini(void *handle)
/* disable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, false);
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_put_irq(adev);
return 0;
}
@@ -855,7 +878,7 @@ static int soc15_common_set_powergating_state(void *handle,
const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
- .late_init = NULL,
+ .late_init = soc15_common_late_init,
.sw_init = soc15_common_sw_init,
.sw_fini = soc15_common_sw_fini,
.hw_init = soc15_common_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 9a4129d881aa..8ab0f78794a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -135,12 +135,9 @@ static int uvd_v4_2_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
+
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
@@ -230,11 +227,7 @@ static int uvd_v4_2_suspend(void *handle)
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v4_2_resume(void *handle)
@@ -246,11 +239,7 @@ static int uvd_v4_2_resume(void *handle)
if (r)
return r;
- r = uvd_v4_2_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v4_2_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e448f7d86bc0..bb6d46e168a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -131,11 +131,7 @@ static int uvd_v5_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -228,11 +224,7 @@ static int uvd_v5_0_suspend(void *handle)
return r;
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v5_0_resume(void *handle)
@@ -244,11 +236,7 @@ static int uvd_v5_0_resume(void *handle)
if (r)
return r;
- r = uvd_v5_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v5_0_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 5679a4249bd9..31db356476f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -134,11 +134,7 @@ static int uvd_v6_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -230,11 +226,8 @@ static int uvd_v6_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
@@ -250,11 +243,7 @@ static int uvd_v6_0_resume(void *handle)
if (r)
return r;
}
- r = uvd_v6_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v6_0_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 13f52e0af9b8..9bcf01469282 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -438,11 +438,7 @@ static int uvd_v7_0_sw_fini(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -547,11 +543,8 @@ static int uvd_v7_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
@@ -567,11 +560,7 @@ static int uvd_v7_0_resume(void *handle)
if (r)
return r;
}
- r = uvd_v7_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v7_0_hw_init(adev);
}
/**
@@ -1045,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
unsigned eng = ring->idx;
unsigned i;
@@ -1055,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
@@ -1091,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -1100,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 49a6c45e65be..47f70827195b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -451,11 +451,7 @@ static int vce_v2_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v2_0_hw_init(void *handle)
@@ -495,11 +491,7 @@ static int vce_v2_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v2_0_resume(void *handle)
@@ -511,11 +503,7 @@ static int vce_v2_0_resume(void *handle)
if (r)
return r;
- r = vce_v2_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v2_0_hw_init(adev);
}
static int vce_v2_0_soft_reset(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index db0adac073c6..fb0819359909 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -417,11 +417,7 @@ static int vce_v3_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v3_0_hw_init(void *handle)
@@ -471,11 +467,7 @@ static int vce_v3_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v3_0_resume(void *handle)
@@ -487,11 +479,7 @@ static int vce_v3_0_resume(void *handle)
if (r)
return r;
- r = vce_v3_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v3_0_hw_init(adev);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index becc5f744a98..edde5fe938d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -527,11 +527,7 @@ static int vce_v4_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v4_0_hw_init(void *handle)
@@ -584,11 +580,7 @@ static int vce_v4_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v4_0_resume(void *handle)
@@ -600,11 +592,7 @@ static int vce_v4_0_resume(void *handle)
if (r)
return r;
- r = vce_v4_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v4_0_hw_init(adev);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@@ -985,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -994,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index b3a86e0e96e6..5f2ab9c1609a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -362,7 +362,89 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_SET_RESOURCES 0xA0
+/* 1. header
+ * 2. CONTROL
+ * 3. QUEUE_MASK_LO [31:0]
+ * 4. QUEUE_MASK_HI [31:0]
+ * 5. GWS_MASK_LO [31:0]
+ * 6. GWS_MASK_HI [31:0]
+ * 7. OAC_MASK [15:0]
+ * 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
+ */
+# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
+# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
+# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
#define PACKET3_MAP_QUEUES 0xA2
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. MQD_ADDR_LO [31:0]
+ * 5. MQD_ADDR_HI [31:0]
+ * 6. WPTR_ADDR_LO [31:0]
+ * 7. WPTR_ADDR_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
+# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
+# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
+# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2 */
+# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
+# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29)
+# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31)
+#define PACKET3_UNMAP_QUEUES 0xA3
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. CONTROL3
+ * 5. CONTROL4
+ * 6. CONTROL5
+ */
+/* CONTROL */
+# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
+ /* 0 - PREEMPT_QUEUES
+ * 1 - RESET_QUEUES
+ * 2 - DISABLE_PROCESS_QUEUES
+ * 3 - PREEMPT_QUEUES_NO_UNMAP
+ */
+# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2a */
+# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
+/* CONTROL3a */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
+/* CONTROL3b */
+# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
+/* CONTROL4 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
+/* CONTROL5 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
+#define PACKET3_QUERY_STATUS 0xA4
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. ADDR_LO [31:0]
+ * 5. ADDR_HI [31:0]
+ * 6. DATA_LO [31:0]
+ * 7. DATA_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
+# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
+# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
+/* CONTROL2a */
+# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index dfd4fe6f0578..9da5b0bb66d8 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -493,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
{
enum amd_pm_state_type ps;
- if (input == NULL)
- return -EINVAL;
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
ps = *(unsigned long *)input;
data.requested_ui_label = power_state_convert(ps);
@@ -539,15 +541,19 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
}
mutex_unlock(&pp_handle->pp_lock);
@@ -894,7 +900,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
mutex_lock(&pp_handle->pp_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_lock(&pp_handle->pp_lock);
+ mutex_unlock(&pp_handle->pp_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index 8e53d3a5e725..6a907c93fd9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -250,6 +250,29 @@ typedef struct _ATOM_Vega10_Fan_Table {
USHORT usFanStartTemperature;
} ATOM_Vega10_Fan_Table;
+typedef struct _ATOM_Vega10_Fan_Table_V2 {
+ UCHAR ucRevId;
+ USHORT usFanOutputSensitivity;
+ USHORT usFanAcousticLimitRpm;
+ USHORT usThrottlingRPM;
+ USHORT usTargetTemperature;
+ USHORT usMinimumPWMLimit;
+ USHORT usTargetGfxClk;
+ USHORT usFanGainEdge;
+ USHORT usFanGainHotspot;
+ USHORT usFanGainLiquid;
+ USHORT usFanGainVrVddc;
+ USHORT usFanGainVrMvdd;
+ USHORT usFanGainPlx;
+ USHORT usFanGainHbm;
+ UCHAR ucEnableZeroRPM;
+ USHORT usFanStopTemperature;
+ USHORT usFanStartTemperature;
+ UCHAR ucFanParameters;
+ UCHAR ucFanMinRPM;
+ UCHAR ucFanMaxRPM;
+} ATOM_Vega10_Fan_Table_V2;
+
typedef struct _ATOM_Vega10_Thermal_Controller {
UCHAR ucRevId;
UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
@@ -305,6 +328,33 @@ typedef struct _ATOM_Vega10_PowerTune_Table {
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table;
+typedef struct _ATOM_Vega10_PowerTune_Table_V2
+{
+ UCHAR ucRevId;
+ USHORT usSocketPowerLimit;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usTdcLimit;
+ USHORT usEdcLimit;
+ USHORT usSoftwareShutdownTemp;
+ USHORT usTemperatureLimitHotSpot;
+ USHORT usTemperatureLimitLiquid1;
+ USHORT usTemperatureLimitLiquid2;
+ USHORT usTemperatureLimitHBM;
+ USHORT usTemperatureLimitVrSoc;
+ USHORT usTemperatureLimitVrMem;
+ USHORT usTemperatureLimitPlx;
+ USHORT usLoadLineResistance;
+ UCHAR ucLiquid1_I2C_address;
+ UCHAR ucLiquid2_I2C_address;
+ UCHAR ucLiquid_I2C_Line;
+ UCHAR ucVr_I2C_address;
+ UCHAR ucVr_I2C_Line;
+ UCHAR ucPlx_I2C_address;
+ UCHAR ucPlx_I2C_Line;
+ USHORT usTemperatureLimitTedge;
+} ATOM_Vega10_PowerTune_Table_V2;
+
typedef struct _ATOM_Vega10_Hard_Limit_Record {
ULONG ulSOCCLKLimit;
ULONG ulGFXCLKLimit;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 518634f995e7..8b55ae01132d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -116,14 +116,16 @@ static int init_thermal_controller(
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
const ATOM_Vega10_Thermal_Controller *thermal_controller;
- const ATOM_Vega10_Fan_Table *fan_table;
+ const Vega10_PPTable_Generic_SubTable_Header *header;
+ const ATOM_Vega10_Fan_Table *fan_table_v1;
+ const ATOM_Vega10_Fan_Table_V2 *fan_table_v2;
thermal_controller = (ATOM_Vega10_Thermal_Controller *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usThermalControllerOffset));
PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0),
- "Thermal controller table not set!", return -1);
+ "Thermal controller table not set!", return -EINVAL);
hwmgr->thermal_controller.ucType = thermal_controller->ucType;
hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
@@ -142,6 +144,9 @@ static int init_thermal_controller(
hwmgr->thermal_controller.fanInfo.ulMaxRPM =
thermal_controller->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
+ = 100000;
+
set_hw_cap(
hwmgr,
ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
@@ -150,54 +155,101 @@ static int init_thermal_controller(
if (!powerplay_table->usFanTableOffset)
return 0;
- fan_table = (const ATOM_Vega10_Fan_Table *)
+ header = (const Vega10_PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usFanTableOffset));
- PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8),
- "Invalid Input Fan Table!", return -1);
+ if (header->ucRevId == 10) {
+ fan_table_v1 = (ATOM_Vega10_Fan_Table *)header;
- hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
- = 100000;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- le16_to_cpu(fan_table->usFanOutputSensitivity);
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- le16_to_cpu(fan_table->usFanRPMMax);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
- le16_to_cpu(fan_table->usThrottlingRPM);
- hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
- le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit));
- hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
- le16_to_cpu(fan_table->usTargetTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
- le16_to_cpu(fan_table->usMinimumPWMLimit);
- hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
- le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk));
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
- le16_to_cpu(fan_table->usFanGainEdge);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
- le16_to_cpu(fan_table->usFanGainHotspot);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
- le16_to_cpu(fan_table->usFanGainLiquid);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
- le16_to_cpu(fan_table->usFanGainVrVddc);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
- le16_to_cpu(fan_table->usFanGainVrMvdd);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
- le16_to_cpu(fan_table->usFanGainPlx);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
- le16_to_cpu(fan_table->usFanGainHbm);
-
- hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
- fan_table->ucEnableZeroRPM;
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
- le16_to_cpu(fan_table->usFanStopTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
- le16_to_cpu(fan_table->usFanStartTemperature);
+ PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8),
+ "Invalid Input Fan Table!", return -EINVAL);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v1->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ le16_to_cpu(fan_table_v1->usFanRPMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v1->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v1->usFanAcousticLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v1->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v1->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v1->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v1->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v1->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v1->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v1->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v1->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v1->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v1->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v1->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v1->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v1->usFanStartTemperature);
+ } else if (header->ucRevId > 10) {
+ fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header;
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v2->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ fan_table_v2->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v2->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v2->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v2->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v2->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v2->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v2->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v2->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v2->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v2->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v2->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v2->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v2->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v2->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v2->usFanStartTemperature);
+ }
return 0;
}
@@ -261,6 +313,48 @@ static int get_mm_clock_voltage_table(
return 0;
}
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
+{
+ switch(line){
+ case Vega10_I2CLineID_DDC1:
+ *scl = Vega10_I2C_DDC1CLK;
+ *sda = Vega10_I2C_DDC1DATA;
+ break;
+ case Vega10_I2CLineID_DDC2:
+ *scl = Vega10_I2C_DDC2CLK;
+ *sda = Vega10_I2C_DDC2DATA;
+ break;
+ case Vega10_I2CLineID_DDC3:
+ *scl = Vega10_I2C_DDC3CLK;
+ *sda = Vega10_I2C_DDC3DATA;
+ break;
+ case Vega10_I2CLineID_DDC4:
+ *scl = Vega10_I2C_DDC4CLK;
+ *sda = Vega10_I2C_DDC4DATA;
+ break;
+ case Vega10_I2CLineID_DDC5:
+ *scl = Vega10_I2C_DDC5CLK;
+ *sda = Vega10_I2C_DDC5DATA;
+ break;
+ case Vega10_I2CLineID_DDC6:
+ *scl = Vega10_I2C_DDC6CLK;
+ *sda = Vega10_I2C_DDC6DATA;
+ break;
+ case Vega10_I2CLineID_SCLSDA:
+ *scl = Vega10_I2C_SCL;
+ *sda = Vega10_I2C_SDA;
+ break;
+ case Vega10_I2CLineID_DDCVGA:
+ *scl = Vega10_I2C_DDCVGACLK;
+ *sda = Vega10_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
static int get_tdp_table(
struct pp_hwmgr *hwmgr,
struct phm_tdp_table **info_tdp_table,
@@ -268,59 +362,99 @@ static int get_tdp_table(
{
uint32_t table_size;
struct phm_tdp_table *tdp_table;
-
- const ATOM_Vega10_PowerTune_Table *power_tune_table =
- (ATOM_Vega10_PowerTune_Table *)table;
-
- table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
- hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *)
- kzalloc(table_size, GFP_KERNEL);
-
- if (!hwmgr->dyn_state.cac_dtp_table)
- return -ENOMEM;
+ uint8_t scl;
+ uint8_t sda;
+ const ATOM_Vega10_PowerTune_Table *power_tune_table;
+ const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2;
table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table);
+
tdp_table = kzalloc(table_size, GFP_KERNEL);
- if (!tdp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ if (!tdp_table)
return -ENOMEM;
- }
- tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
- tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
- tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
- tdp_table->usSoftwareShutdownTemp =
- le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
- tdp_table->usTemperatureLimitTedge =
- le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
- tdp_table->usTemperatureLimitHotspot =
- le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
- tdp_table->usTemperatureLimitLiquid1 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
- tdp_table->usTemperatureLimitLiquid2 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
- tdp_table->usTemperatureLimitHBM =
- le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
- tdp_table->usTemperatureLimitVrVddc =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
- tdp_table->usTemperatureLimitVrMvdd =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
- tdp_table->usTemperatureLimitPlx =
- le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
- tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
- tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
- tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
- tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
- tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
- tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
- tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
- tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
- tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
- tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
-
- hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ if (table->ucRevId == 5) {
+ power_tune_table = (ATOM_Vega10_PowerTune_Table *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
+ tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
+ tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
+ tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
+ tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
+ tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
+ tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
+ tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
+ tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
+ hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ } else {
+ power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda);
+
+ tdp_table->ucLiquid_I2C_Line = scl;
+ tdp_table->ucLiquid_I2C_LineSDA = sda;
+
+ tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda);
+
+ tdp_table->ucVr_I2C_Line = scl;
+ tdp_table->ucVr_I2C_LineSDA = sda;
+ tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda);
+
+ tdp_table->ucPlx_I2C_Line = scl;
+ tdp_table->ucPlx_I2C_LineSDA = sda;
+
+ hwmgr->platform_descriptor.LoadLineSlope =
+ power_tune_table_v2->usLoadLineResistance;
+ }
*info_tdp_table = tdp_table;
@@ -836,7 +970,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddc_lookup_table, vddc_table, 16);
+ &pp_table_info->vddc_lookup_table, vddc_table, 8);
}
if (powerplay_table->usVddmemLookupTableOffset) {
@@ -845,7 +979,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddmemLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16);
+ &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4);
}
if (powerplay_table->usVddciLookupTableOffset) {
@@ -854,7 +988,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddciLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddci_lookup_table, vddci_table, 16);
+ &pp_table_info->vddci_lookup_table, vddci_table, 4);
}
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
index 995d133ba6aa..d83ed2af7aa3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
@@ -26,6 +26,34 @@
#include "hwmgr.h"
+enum Vega10_I2CLineID {
+ Vega10_I2CLineID_DDC1 = 0x90,
+ Vega10_I2CLineID_DDC2 = 0x91,
+ Vega10_I2CLineID_DDC3 = 0x92,
+ Vega10_I2CLineID_DDC4 = 0x93,
+ Vega10_I2CLineID_DDC5 = 0x94,
+ Vega10_I2CLineID_DDC6 = 0x95,
+ Vega10_I2CLineID_SCLSDA = 0x96,
+ Vega10_I2CLineID_DDCVGA = 0x97
+};
+
+#define Vega10_I2C_DDC1DATA 0
+#define Vega10_I2C_DDC1CLK 1
+#define Vega10_I2C_DDC2DATA 2
+#define Vega10_I2C_DDC2CLK 3
+#define Vega10_I2C_DDC3DATA 4
+#define Vega10_I2C_DDC3CLK 5
+#define Vega10_I2C_SDA 40
+#define Vega10_I2C_SCL 41
+#define Vega10_I2C_DDC4DATA 65
+#define Vega10_I2C_DDC4CLK 66
+#define Vega10_I2C_DDC5DATA 0x48
+#define Vega10_I2C_DDC5CLK 0x49
+#define Vega10_I2C_DDC6DATA 0x4a
+#define Vega10_I2C_DDC6CLK 0x4b
+#define Vega10_I2C_DDCVGADATA 0x4c
+#define Vega10_I2C_DDCVGACLK 0x4d
+
extern const struct pp_table_func vega10_pptable_funcs;
extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index aee021451d35..2037910adcb1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -30,7 +30,9 @@
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
-#define SMU9_DRIVER_IF_VERSION 0xa
+#define SMU9_DRIVER_IF_VERSION 0xB
+
+#define PPTABLE_V10_SMU_VERSION 1
#define NUM_GFXCLK_DPM_LEVELS 8
#define NUM_UVD_DPM_LEVELS 8
@@ -87,6 +89,11 @@ typedef struct {
int32_t a0;
int32_t a1;
int32_t a2;
+
+ uint8_t a0_shift;
+ uint8_t a1_shift;
+ uint8_t a2_shift;
+ uint8_t padding;
} GbVdroopTable_t;
typedef struct {
@@ -293,7 +300,9 @@ typedef struct {
uint16_t Platform_sigma;
uint16_t PSM_Age_CompFactor;
- uint32_t Reserved[20];
+ uint32_t DpmLevelPowerDelta;
+
+ uint32_t Reserved[19];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
@@ -350,8 +359,8 @@ typedef struct {
typedef struct {
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
- uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */
- uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
uint32_t MmHubPadding[7]; /* SMU internal use */
} AvfsDebugTable_t;
@@ -414,5 +423,45 @@ typedef struct {
#define UCLK_SWITCH_SLOW 0
#define UCLK_SWITCH_FAST 1
+/* GFX DIDT Configuration */
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 50c910efa13d..e879496b8a42 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 857755ac2d70..c4cadb638460 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f53aa8f4a143..93dbcd38355d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index cc1731c5289c..71cee4e9fefb 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -5,6 +5,7 @@ config DRM_ETNAVIV
depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU
select SHMEM
+ select SYNC_FILE
select TMPFS
select IOMMU_API
select IOMMU_SUPPORT
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 587e45043542..5255278dde56 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv;
@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = {
DRIVER_PRIME |
DRIVER_RENDER,
.open = etnaviv_open,
- .preclose = etnaviv_preclose,
+ .postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 0,
+ .minor = 1,
};
/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index e63ff116a3b3..c4a091e87426 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -20,6 +20,7 @@
#include <linux/reservation.h>
#include "etnaviv_drv.h"
+struct dma_fence;
struct etnaviv_gem_ops;
struct etnaviv_gem_object;
@@ -104,9 +105,10 @@ struct etnaviv_gem_submit {
struct drm_device *dev;
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
- u32 fence;
+ struct dma_fence *fence;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
+ u32 flags;
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 726090d7a6ac..e1909429837e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -14,7 +14,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
+#include <linux/sync_file.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+ bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
- ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+ ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
+ explicit);
if (ret)
break;
}
@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
}
ww_acquire_fini(&submit->ticket);
+ dma_fence_put(submit->fence);
kfree(submit);
}
@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
+ struct dma_fence *in_fence = NULL;
+ struct sync_file *sync_file = NULL;
+ int out_fence_fd = -1;
void *stream;
int ret;
@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+ DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_submit_cmds;
+ }
+ }
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto err_submit_cmds;
}
+ submit->flags = args->flags;
+
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto err_submit_objects;
+ }
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
+ ret = dma_fence_wait(in_fence, true);
+ if (ret)
+ goto err_submit_objects;
+ }
+ }
+
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0)
cmdbuf = NULL;
- args->fence = submit->fence;
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ /*
+ * This can be improved: ideally we want to allocate the sync
+ * file before kicking off the GPU job and just attach the
+ * fence to the sync file here, eliminating the ENOMEM
+ * possibility at this stage.
+ */
+ sync_file = sync_file_create(submit->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
+ args->fence_fd = out_fence_fd;
+ args->fence = submit->fence->seqno;
out:
submit_unpin_objects(submit);
@@ -419,9 +476,13 @@ out:
flush_workqueue(priv->wq);
err_submit_objects:
+ if (in_fence)
+ dma_fence_put(in_fence);
submit_cleanup(submit);
err_submit_cmds:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
/* if we still own the cmdbuf */
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index da48819ff2e6..4f587058a3aa 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,7 @@
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
}
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 clock;
+
+ clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+ etnaviv_gpu_load_clock(gpu, clock);
+}
+
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
{
u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
/* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
+
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* Wait for stable clock. Vivante's code waited for 1ms */
usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
}
/* We rely on the GPU running, so program the clock */
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- /* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
return 0;
}
@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
+ /*
+ * GPU lock must already be held, otherwise fence completion order might
+ * not match the seqno order assigned here.
+ */
+ lockdep_assert_held(&gpu->lock);
+
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
}
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive)
+ unsigned int context, bool exclusive, bool explicit)
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
return ret;
}
+ if (explicit)
+ return 0;
+
/*
* If we have any shared fences, then the exclusive fence
* should be ignored as it will already have been signalled.
@@ -1321,8 +1337,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
}
gpu->event[event].fence = fence;
- submit->fence = fence->seqno;
- gpu->active_fence = submit->fence;
+ submit->fence = dma_fence_get(fence);
+ gpu->active_fence = submit->fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true;
@@ -1526,17 +1542,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
#ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
- u32 clock;
int ret;
ret = mutex_lock_killable(&gpu->lock);
if (ret)
return ret;
- clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- etnaviv_gpu_load_clock(gpu, clock);
+ etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
@@ -1548,6 +1560,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
}
#endif
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 6;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ *state = gpu->freq_scale;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ mutex_lock(&gpu->lock);
+ gpu->freq_scale = state;
+ if (!pm_runtime_suspended(gpu->dev))
+ etnaviv_gpu_update_clock(gpu);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+ .get_max_state = etnaviv_gpu_cooling_get_max_state,
+ .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+ .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
static int etnaviv_gpu_bind(struct device *dev, struct device *master,
void *data)
{
@@ -1556,13 +1609,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ (char *)dev_name(dev), gpu, &cooling_ops);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
- if (ret < 0)
+ if (ret < 0) {
+ thermal_cooling_device_unregister(gpu->cooling);
return ret;
+ }
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1676,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
+
+ thermal_cooling_device_unregister(gpu->cooling);
+ gpu->cooling = NULL;
}
static const struct component_ops gpu_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c0606ea7d5e..9227a9740447 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
struct etnaviv_gpu {
struct drm_device *drm;
+ struct thermal_cooling_device *cooling;
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
u32 hangcheck_fence;
u32 hangcheck_dma_addr;
struct work_struct recover_work;
+ unsigned int freq_scale;
};
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive);
+ unsigned int context, bool exclusive, bool implicit);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6f972afbdbc3..94f2e701e4d4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1215,7 +1215,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1243,7 +1243,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1267,7 +1267,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1278,7 +1278,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1289,7 +1291,9 @@ static int update_plane_mmio_from_mi_display_flip(
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1569,7 +1573,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
return 0;
@@ -2604,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
@@ -2618,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
- s.vgpu = wa_ctx->workload->vgpu;
- s.ring_id = wa_ctx->workload->ring_id;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
- s.workload = wa_ctx->workload;
+ s.workload = workload;
ret = ip_gma_set(&s, gma_head);
if (ret)
@@ -2708,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -2733,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto unmap_src;
}
- ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
- wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ ret = copy_gma_to_hva(workload->vgpu,
+ workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
if (ret < 0) {
@@ -2772,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 5419ae6ec633..4cf2b29fbaa1 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
#define DPCD_HEADER_SIZE 0xb
+/* let the virtual display supports DP1.2 */
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
- 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
@@ -172,9 +173,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
+ vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ SKL_FUSE_DOWNLOAD_STATUS |
+ SKL_FUSE_PG0_DIST_STATUS |
+ SKL_FUSE_PG1_DIST_STATUS |
+ SKL_FUSE_PG2_DIST_STATUS;
+ vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ LCPLL_PLL_ENABLE |
+ LCPLL_PLL_LOCK;
+ vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+
+ }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
@@ -191,7 +203,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if (IS_SKYLAKE(dev_priv) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -353,7 +365,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -375,7 +387,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f1f426a97aa9..ce4276a7cf9c 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- int ring_id = wa_ctx->workload->ring_id;
- struct i915_gem_context *shadow_ctx =
- wa_ctx->workload->vgpu->shadow_ctx;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -680,7 +682,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
- workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b832bea64e03..6da4e444e572 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2224,7 +2224,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..0f3a98865a58 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -106,7 +106,8 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
@@ -143,6 +144,11 @@ static int gvt_service_thread(void *data)
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
}
return 0;
@@ -196,6 +202,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
idr_destroy(&gvt->vgpu_idr);
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -214,6 +222,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
struct intel_gvt *gvt;
+ struct intel_vgpu *vgpu;
int ret;
/*
@@ -286,6 +295,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types;
}
+ vgpu = intel_gvt_create_idle_vgpu(gvt);
+ if (IS_ERR(vgpu)) {
+ ret = PTR_ERR(vgpu);
+ gvt_err("failed to create idle vgpu\n");
+ goto out_clean_types;
+ }
+ gvt->idle_vgpu = vgpu;
+
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6dfc48b63b71..806da96b6a92 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -138,6 +138,10 @@ struct intel_vgpu_display {
struct intel_vgpu_sbi sbi;
};
+struct vgpu_sched_ctl {
+ int weight;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@@ -147,6 +151,7 @@ struct intel_vgpu {
bool failsafe;
bool resetting;
void *sched_data;
+ struct vgpu_sched_ctl sched_ctl;
struct intel_vgpu_fence fence;
struct intel_vgpu_gm gm;
@@ -160,6 +165,7 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ ktime_t last_ctx_submit_time;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
@@ -215,6 +221,7 @@ struct intel_vgpu_type {
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ unsigned int weight;
enum intel_vgpu_edid resolution;
};
@@ -236,6 +243,7 @@ struct intel_gvt {
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
+ struct intel_vgpu *idle_vgpu;
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
@@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+ INTEL_GVT_REQUEST_SCHED = 1,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -322,6 +331,8 @@ struct intel_vgpu_creation_params {
__u64 resolution;
__s32 primary;
__u64 vgpu_id;
+
+ __u32 weight;
};
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
@@ -376,6 +387,8 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6da9ae1618e3..0ad1a508e2af 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv))
return D_SKL;
+ else if (IS_KABYLAKE(gvt->dev_priv))
+ return D_KBL;
return 0;
}
@@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
- offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
@@ -1311,7 +1315,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1324,7 +1329,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1418,6 +1424,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
+ vgpu->last_ctx_submit_time = ktime_get();
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -2592,219 +2599,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
+ skl_power_well_ctl_write);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_D(0x45504, D_SKL);
- MMIO_D(0x45520, D_SKL);
- MMIO_D(0x46000, D_SKL);
- MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL);
- MMIO_D(0x6C048, D_SKL);
- MMIO_D(0x6C050, D_SKL);
- MMIO_D(0x6C044, D_SKL);
- MMIO_D(0x6C04C, D_SKL);
- MMIO_D(0x6C054, D_SKL);
- MMIO_D(0x6c058, D_SKL);
- MMIO_D(0x6c05c, D_SKL);
- MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
-
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_D(0x70380, D_SKL);
- MMIO_D(0x71380, D_SKL);
- MMIO_D(0x72380, D_SKL);
- MMIO_D(0x7039c, D_SKL);
-
- MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_D(0x8f074, D_SKL);
- MMIO_D(0x8f004, D_SKL);
- MMIO_D(0x8f034, D_SKL);
-
- MMIO_D(0xb11c, D_SKL);
-
- MMIO_D(0x51000, D_SKL);
- MMIO_D(0x6c00c, D_SKL);
-
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_D(0xd08, D_SKL);
- MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL_PLUS);
+ MMIO_D(0x45520, D_SKL_PLUS);
+ MMIO_D(0x46000, D_SKL_PLUS);
+ MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL | D_KBL);
+ MMIO_D(0x6C048, D_SKL | D_KBL);
+ MMIO_D(0x6C050, D_SKL | D_KBL);
+ MMIO_D(0x6C044, D_SKL | D_KBL);
+ MMIO_D(0x6C04C, D_SKL | D_KBL);
+ MMIO_D(0x6C054, D_SKL | D_KBL);
+ MMIO_D(0x6c058, D_SKL | D_KBL);
+ MMIO_D(0x6c05c, D_SKL | D_KBL);
+ MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL_PLUS);
+ MMIO_D(0x71380, D_SKL_PLUS);
+ MMIO_D(0x72380, D_SKL_PLUS);
+ MMIO_D(0x7039c, D_SKL_PLUS);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL | D_KBL);
+ MMIO_D(0x8f004, D_SKL | D_KBL);
+ MMIO_D(0x8f034, D_SKL | D_KBL);
+
+ MMIO_D(0xb11c, D_SKL | D_KBL);
+
+ MMIO_D(0x51000, D_SKL | D_KBL);
+ MMIO_D(0x6c00c, D_SKL_PLUS);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL_PLUS);
+ MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL);
+ MMIO_D(0x45008, D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL);
+ MMIO_D(0x46430, D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL);
+ MMIO_D(0x46520, D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL);
- MMIO_D(0xb004, D_SKL);
+ MMIO_D(0xc403c, D_SKL | D_KBL);
+ MMIO_D(0xb004, D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL);
- MMIO_D(0x1082c0, D_SKL);
- MMIO_D(0x4068, D_SKL);
- MMIO_D(0x67054, D_SKL);
- MMIO_D(0x6e560, D_SKL);
- MMIO_D(0x6e554, D_SKL);
- MMIO_D(0x2b20, D_SKL);
- MMIO_D(0x65f00, D_SKL);
- MMIO_D(0x65f08, D_SKL);
- MMIO_D(0x320f0, D_SKL);
-
- MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x70034, D_SKL);
- MMIO_D(0x71034, D_SKL);
- MMIO_D(0x72034, D_SKL);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
-
- MMIO_D(0x44500, D_SKL);
+ MMIO_D(0x65900, D_SKL_PLUS);
+ MMIO_D(0x1082c0, D_SKL | D_KBL);
+ MMIO_D(0x4068, D_SKL | D_KBL);
+ MMIO_D(0x67054, D_SKL | D_KBL);
+ MMIO_D(0x6e560, D_SKL | D_KBL);
+ MMIO_D(0x6e554, D_SKL | D_KBL);
+ MMIO_D(0x2b20, D_SKL | D_KBL);
+ MMIO_D(0x65f00, D_SKL | D_KBL);
+ MMIO_D(0x65f08, D_SKL | D_KBL);
+ MMIO_D(0x320f0, D_SKL | D_KBL);
+
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x70034, D_SKL_PLUS);
+ MMIO_D(0x71034, D_SKL_PLUS);
+ MMIO_D(0x72034, D_SKL_PLUS);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
+
+ MMIO_D(0x44500, D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
+
+ MMIO_D(0x4ab8, D_KBL);
+ MMIO_D(0x940c, D_SKL_PLUS);
+ MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(0x4ab0, D_SKL | D_KBL);
+ MMIO_D(0x20d4, D_SKL | D_KBL);
+
return 0;
}
@@ -2881,7 +2901,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 92bb247e3478..9d6812f0957f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -580,7 +580,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d641214578a7..42ff7ffb6066 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n",
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution));
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
@@ -1146,8 +1148,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return 0;
}
+static ssize_t
+vgpu_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+
+ if (mdev) {
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)
+ mdev_get_drvdata(mdev);
+ return sprintf(buf, "%d\n", vgpu->id);
+ }
+ return sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(vgpu_id);
+
+static struct attribute *intel_vgpu_attrs[] = {
+ &dev_attr_vgpu_id.attr,
+ NULL
+};
+
+static const struct attribute_group intel_vgpu_group = {
+ .name = "intel_vgpu",
+ .attrs = intel_vgpu_attrs,
+};
+
+static const struct attribute_group *intel_vgpu_groups[] = {
+ &intel_vgpu_group,
+ NULL,
+};
+
static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
+ .mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@@ -1340,13 +1374,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- struct intel_vgpu *vgpu = info->vgpu;
-
- if (!info) {
- gvt_vgpu_err("kvmgt_guest_info invalid\n");
- return false;
- }
-
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index a3a027025cd0..7edd66f38ef9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -44,20 +44,21 @@ struct intel_vgpu;
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
+#define D_KBL (1 << 5)
-#define D_GEN9PLUS (D_SKL)
-#define D_GEN8PLUS (D_BDW | D_SKL)
-#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
-#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_GEN9PLUS (D_SKL | D_KBL)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_SKL_PLUS (D_SKL)
-#define D_BDW_PLUS (D_BDW | D_SKL)
-#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
-#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_SKL_PLUS (D_SKL | D_KBL)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
-#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
struct intel_gvt_mmio_info {
u32 offset;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 0beb83563b08..a7b665e9bbad 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = {
{VCS2, _MMIO(0x1c028), 0xffff, false},
{VECS, _MMIO(0x1a028), 0xffff, false},
+
+ {RCS, _MMIO(0x7304), 0xffff, true},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x940c), 0x0, false},
+ {RCS, _MMIO(0x4ab8), 0x0, false},
+
+ {RCS, _MMIO(0x4ab0), 0x0, false},
+ {RCS, _MMIO(0x20d4), 0x0, false},
+
+ {RCS, _MMIO(0xb004), 0x0, false},
+ {RCS, _MMIO(0x20a0), 0x0, false},
+ {RCS, _MMIO(0x20e4), 0xffff, false},
};
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
@@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -192,7 +204,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
offset.reg = regs[ring_id];
@@ -230,7 +242,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
offset.reg = regs[ring_id];
@@ -265,7 +277,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
@@ -312,7 +325,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 v;
int i, array_size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 34b9acdf3479..f84959170674 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,87 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}
+struct vgpu_sched_data {
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+
+ ktime_t sched_in_time;
+ ktime_t sched_out_time;
+ ktime_t sched_time;
+ ktime_t left_ts;
+ ktime_t allocated_ts;
+
+ struct vgpu_sched_ctl sched_ctl;
+};
+
+struct gvt_sched_data {
+ struct intel_gvt *gvt;
+ struct hrtimer timer;
+ unsigned long period;
+ struct list_head lru_runq_head;
+};
+
+static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+{
+ ktime_t delta_ts;
+ struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+
+ delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+
+ vgpu_data->sched_time += delta_ts;
+ vgpu_data->left_ts -= delta_ts;
+}
+
+#define GVT_TS_BALANCE_PERIOD_MS 100
+#define GVT_TS_BALANCE_STAGE_NUM 10
+
+static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
+{
+ struct vgpu_sched_data *vgpu_data;
+ struct list_head *pos;
+ static uint64_t stage_check;
+ int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
+
+ /* The timeslice accumulation reset at stage 0, which is
+ * allocated again without adding previous debt.
+ */
+ if (stage == 0) {
+ int total_weight = 0;
+ ktime_t fair_timeslice;
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ total_weight += vgpu_data->sched_ctl.weight;
+ }
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
+ vgpu_data->sched_ctl.weight /
+ total_weight;
+
+ vgpu_data->allocated_ts = fair_timeslice;
+ vgpu_data->left_ts = vgpu_data->allocated_ts;
+ }
+ } else {
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+
+ /* timeslice for next 100ms should add the left/debt
+ * slice of previous stages.
+ */
+ vgpu_data->left_ts += vgpu_data->allocated_ts;
+ }
+ }
+}
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct vgpu_sched_data *vgpu_data;
+ ktime_t cur_time;
/* no target to schedule */
if (!scheduler->next_vgpu)
@@ -77,6 +153,15 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
gvt_dbg_sched("switch to next vgpu %d\n",
scheduler->next_vgpu->id);
+ cur_time = ktime_get();
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ vgpu_data->sched_out_time = cur_time;
+ vgpu_update_timeslice(scheduler->current_vgpu);
+ }
+ vgpu_data = scheduler->next_vgpu->sched_data;
+ vgpu_data->sched_in_time = cur_time;
+
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
scheduler->next_vgpu = NULL;
@@ -88,62 +173,61 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
wake_up(&scheduler->waitq[i]);
}
-struct tbs_vgpu_data {
- struct list_head list;
- struct intel_vgpu *vgpu;
- /* put some per-vgpu sched stats here */
-};
-
-struct tbs_sched_data {
- struct intel_gvt *gvt;
- struct delayed_work work;
- unsigned long period;
- struct list_head runq_head;
-};
-
-#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
-
-static void tbs_sched_func(struct work_struct *work)
+static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
{
- struct tbs_sched_data *sched_data = container_of(work,
- struct tbs_sched_data, work.work);
- struct tbs_vgpu_data *vgpu_data;
-
- struct intel_gvt *gvt = sched_data->gvt;
- struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-
+ struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
- struct list_head *pos, *head;
-
- mutex_lock(&gvt->lock);
-
- /* no vgpu or has already had a target */
- if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- goto out;
-
- if (scheduler->current_vgpu) {
- vgpu_data = scheduler->current_vgpu->sched_data;
- head = &vgpu_data->list;
- } else {
- head = &sched_data->runq_head;
- }
+ struct list_head *head = &sched_data->lru_runq_head;
+ struct list_head *pos;
/* search a vgpu with pending workload */
list_for_each(pos, head) {
- if (pos == &sched_data->runq_head)
- continue;
- vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
- vgpu = vgpu_data->vgpu;
- break;
+ /* Return the vGPU only if it has time slice left */
+ if (vgpu_data->left_ts > 0) {
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
}
+ return vgpu;
+}
+
+/* in nanosecond */
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct gvt_sched_data *sched_data)
+{
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct vgpu_sched_data *vgpu_data;
+ struct intel_vgpu *vgpu = NULL;
+ static uint64_t timer_check;
+
+ if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ gvt_balance_timeslice(sched_data);
+
+ /* no active vgpu or has already had a target */
+ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;
+
+ /* Move the last used vGPU to the tail of lru_list */
+ vgpu_data = vgpu->sched_data;
+ list_del_init(&vgpu_data->lru_list);
+ list_add_tail(&vgpu_data->lru_list,
+ &sched_data->lru_runq_head);
+
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+ } else {
+ scheduler->next_vgpu = gvt->idle_vgpu;
}
out:
if (scheduler->next_vgpu) {
@@ -151,34 +235,49 @@ out:
scheduler->next_vgpu->id);
try_to_schedule_next_vgpu(gvt);
}
+}
- /*
- * still have vgpu on runq
- * or last schedule haven't finished due to running workload
- */
- if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- schedule_delayed_work(&sched_data->work, sched_data->period);
+void intel_gvt_schedule(struct intel_gvt *gvt)
+{
+ struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+ mutex_lock(&gvt->lock);
+ tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
}
+static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
+{
+ struct gvt_sched_data *data;
+
+ data = container_of(timer_data, struct gvt_sched_data, timer);
+
+ intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
+
+ hrtimer_add_expires_ns(&data->timer, data->period);
+
+ return HRTIMER_RESTART;
+}
+
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data;
+ struct gvt_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- INIT_LIST_HEAD(&data->runq_head);
- INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ INIT_LIST_HEAD(&data->lru_runq_head);
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ data->timer.function = tbs_timer_fn;
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
+
return 0;
}
@@ -186,25 +285,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data = scheduler->sched_data;
+ struct gvt_sched_data *data = scheduler->sched_data;
+
+ hrtimer_cancel(&data->timer);
- cancel_delayed_work(&data->work);
kfree(data);
scheduler->sched_data = NULL;
}
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *data;
+ struct vgpu_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->sched_ctl.weight = vgpu->sched_ctl.weight;
data->vgpu = vgpu;
- INIT_LIST_HEAD(&data->list);
+ INIT_LIST_HEAD(&data->lru_list);
vgpu->sched_data = data;
+
return 0;
}
@@ -216,21 +318,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- if (!list_empty(&vgpu_data->list))
+ if (!list_empty(&vgpu_data->lru_list))
return;
- list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, 0);
+ list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+
+ if (!hrtimer_active(&sched_data->timer))
+ hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
+ sched_data->period), HRTIMER_MODE_ABS);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- list_del_init(&vgpu_data->list);
+ list_del_init(&vgpu_data->lru_list);
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index bb8b9097e41a..ba00a5f7455f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
+void intel_gvt_schedule(struct intel_gvt *gvt);
+
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d9a735310e75..a77db2332e68 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -453,7 +453,8 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2833dfa8c9ae..2cd725c0573e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -67,7 +67,6 @@ struct shadow_per_ctx {
};
struct intel_shadow_wa_ctx {
- struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..6ba02525e905 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+#define VGPU_MAX_WEIGHT 16
+#define VGPU_WEIGHT(vgpu_num) \
+ (VGPU_MAX_WEIGHT / (vgpu_num))
+
static struct {
unsigned int low_mm;
unsigned int high_mm;
unsigned int fence;
+
+ /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
+ * with a weight of 4 on a contended host, different vGPU type has
+ * different weight set. Legal weights range from 1 to 16.
+ */
+ unsigned int weight;
enum intel_vgpu_edid edid;
char *name;
} vgpu_types[] = {
/* Fixed vGPU type table */
- { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
- { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
- { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
- { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+ { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
};
/**
@@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
gvt->types[i].fence = vgpu_types[i].fence;
+
+ if (vgpu_types[i].weight < 1 ||
+ vgpu_types[i].weight > VGPU_MAX_WEIGHT)
+ return -EINVAL;
+
+ gvt->types[i].weight = vgpu_types[i].weight;
gvt->types[i].resolution = vgpu_types[i].edid;
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
@@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
- gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence,
+ gvt->types[i].weight,
vgpu_edid_str(gvt->types[i].resolution));
}
@@ -216,6 +233,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&gvt->lock);
}
+#define IDLE_VGPU_IDR 0
+
+/**
+ * intel_gvt_create_idle_vgpu - create an idle virtual GPU
+ * @gvt: GVT device
+ *
+ * This function is called when user wants to create an idle virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ enum intel_engine_id i;
+ int ret;
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ vgpu->id = IDLE_VGPU_IDR;
+ vgpu->gvt = gvt;
+
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ vgpu->active = false;
+
+ return vgpu;
+
+out_free_vgpu:
+ vfree(vgpu);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy an idle virtual GPU.
+ *
+ */
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_vgpu_clean_sched_policy(vgpu);
+ vfree(vgpu);
+}
+
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
@@ -232,13 +302,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
- ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
+ GFP_KERNEL);
if (ret < 0)
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
+ vgpu->sched_ctl.weight = param->weight;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
intel_vgpu_init_cfg_space(vgpu, param->primary);
@@ -325,6 +397,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.weight = type->weight;
param.resolution = type->resolution;
/* XXX current param based on MB */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47e707d83c4d..d689e511744e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1012,9 +1012,12 @@ static int gpu_state_release(struct inode *inode, struct file *file)
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
+ struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
- gpu = i915_capture_gpu_state(inode->i_private);
+ intel_runtime_pm_get(i915);
+ gpu = i915_capture_gpu_state(i915);
+ intel_runtime_pm_put(i915);
if (!gpu)
return -ENOMEM;
@@ -1459,16 +1462,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
- spin_lock_irq(&dev_priv->uncore.lock);
- for_each_fw_domain(fw_domain, dev_priv) {
+ for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
- fw_domain->wake_count);
- }
- spin_unlock_irq(&dev_priv->uncore.lock);
+ READ_ONCE(fw_domain->wake_count));
return 0;
}
@@ -1938,9 +1939,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
- ring->space, ring->head, ring->tail,
- ring->last_retired_head);
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
+ ring->space, ring->head, ring->tail);
}
static int i915_context_status(struct seq_file *m, void *unused)
@@ -2474,9 +2474,9 @@ static void i915_guc_client_info(struct seq_file *m,
enum intel_engine_id id;
uint64_t tot = 0;
- seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
- client->priority, client->ctx_index, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+ seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
+ client->priority, client->stage_id, client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
@@ -2511,7 +2511,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
}
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
@@ -4129,7 +4129,9 @@ i915_wedged_get(void *data, u64 *val)
static int
i915_wedged_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct drm_i915_private *i915 = data;
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
/*
* There is no safeguard against this debugfs entry colliding
@@ -4139,13 +4141,17 @@ i915_wedged_set(void *data, u64 val)
* while it is writing to 'i915_wedged'
*/
- if (i915_reset_backoff(&dev_priv->gpu_error))
+ if (i915_reset_backoff(&i915->gpu_error))
return -EAGAIN;
- i915_handle_error(dev_priv, val,
- "Manually setting wedged to %llu", val);
+ for_each_engine_masked(engine, i915, val, tmp) {
+ engine->hangcheck.seqno = intel_engine_get_seqno(engine);
+ engine->hangcheck.stalled = true;
+ }
+
+ i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
- wait_on_bit(&dev_priv->gpu_error.flags,
+ wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE);
@@ -4173,10 +4179,6 @@ fault_irq_set(struct drm_i915_private *i915,
if (err)
goto err_unlock;
- /* Retire to kick idle work */
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
-
*irq = val;
mutex_unlock(&i915->drm.struct_mutex);
@@ -4280,7 +4282,7 @@ i915_drop_caches_set(void *data, u64 val)
goto unlock;
}
- if (val & (DROP_RETIRE | DROP_ACTIVE))
+ if (val & DROP_RETIRE)
i915_gem_retire_requests(dev_priv);
lockdep_set_current_reclaim_state(GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98b17070a123..c616b4e755bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -549,6 +549,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -609,7 +610,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_uc;
intel_modeset_gem_init(dev);
@@ -631,9 +632,9 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_uc:
+ intel_uc_fini_fw(dev_priv);
cleanup_irq:
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
cleanup_csr:
@@ -1351,9 +1352,8 @@ void i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
+ intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a5947a496d0a..c9b0949f6c1a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,26 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170320"
-#define DRIVER_TIMESTAMP 1489994464
-
-#undef WARN_ON
-/* Many gcc seem to no see through this and fall over :( */
-#if 0
-#define WARN_ON(x) ({ \
- bool __i915_warn_cond = (x); \
- if (__builtin_constant_p(__i915_warn_cond)) \
- BUILD_BUG_ON(__i915_warn_cond); \
- WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
-#else
-#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-#endif
-
-#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
-
-#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
- (long) (x), __func__);
+#define DRIVER_DATE "20170403"
+#define DRIVER_TIMESTAMP 1491198738
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -703,9 +685,9 @@ enum forcewake_domain_id {
};
enum forcewake_domains {
- FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
FORCEWAKE_ALL = (FORCEWAKE_RENDER |
FORCEWAKE_BLITTER |
FORCEWAKE_MEDIA)
@@ -732,21 +714,25 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
-
- void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint8_t val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint16_t val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint32_t val, bool trace);
+ enum forcewake_domains domains);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint32_t val, bool trace);
};
struct intel_forcewake_range {
@@ -770,32 +756,35 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ u32 fw_set;
+ u32 fw_clear;
+ u32 fw_reset;
+
struct intel_uncore_forcewake_domain {
- struct drm_i915_private *i915;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned wake_count;
struct hrtimer timer;
i915_reg_t reg_set;
- u32 val_set;
- u32 val_clear;
i915_reg_t reg_ack;
- i915_reg_t reg_post;
- u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
int unclaimed_mmio_check;
};
+#define __mask_next_bit(mask) ({ \
+ int __idx = ffs(mask) - 1; \
+ mask &= ~BIT(__idx); \
+ __idx; \
+})
+
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
- for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
- (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
- (domain__)++) \
- for_each_if ((mask__) & (domain__)->mask)
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+ for (tmp__ = (mask__); \
+ tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
-#define for_each_fw_domain(domain__, dev_priv__) \
- for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
+#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
+ for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@@ -846,6 +835,7 @@ struct intel_csr {
func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
+ func(unfenced_needs_alignment); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
@@ -2578,12 +2568,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
(id__)++) \
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
-#define __mask_next_bit(mask) ({ \
- int __idx = ffs(mask) - 1; \
- mask &= ~BIT(__idx); \
- __idx; \
-})
-
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
@@ -3956,14 +3940,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
+static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
+static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58e1db77d70e..bbc6f1c9f175 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2321,7 +2321,7 @@ rebuild_st:
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
@@ -2329,12 +2329,21 @@ rebuild_st:
I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
+ gfp_t reclaim;
+
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
+ *
+ * However, since graphics tend to be disposable,
+ * defer the oom here by reporting the ENOMEM back
+ * to userspace.
*/
- page = shmem_read_mapping_page(mapping, i);
+ reclaim = mapping_gfp_constraint(mapping, 0);
+ reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
+
+ page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_sg;
@@ -2989,10 +2998,15 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
+ /* Retire completed requests first so the list of inflight/incomplete
+ * requests is accurate and we don't try and mark successful requests
+ * as in error during __i915_gem_set_wedged_BKL().
+ */
+ i915_gem_retire_requests(dev_priv);
+
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
i915_gem_context_lost(dev_priv);
- i915_gem_retire_requests(dev_priv);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
@@ -3098,9 +3112,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
- wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
- intel_engines_are_idle(dev_priv),
- 10);
+ wait_for(intel_engines_are_idle(dev_priv), 10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
@@ -3259,6 +3271,29 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
+static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
+{
+ return wait_for(intel_engine_is_idle(engine), timeout_ms);
+}
+
+static int wait_for_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
+ i915_gem_set_wedged(i915);
+ return -EIO;
+ }
+
+ GEM_BUG_ON(intel_engine_get_seqno(engine) !=
+ intel_engine_last_submit(engine));
+ }
+
+ return 0;
+}
+
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
int ret;
@@ -3273,13 +3308,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ ret = wait_for_engines(i915);
} else {
ret = wait_for_timeline(&i915->gt.global_timeline, flags);
- if (ret)
- return ret;
}
- return 0;
+ return ret;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3307,8 +3345,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
* system agents we cannot reproduce this behaviour).
*/
wmb();
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+ }
+ }
intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -4408,9 +4452,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
if (ret)
goto err_unlock;
- i915_gem_retire_requests(dev_priv);
- GEM_BUG_ON(dev_priv->gt.active_requests);
-
assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index d925fb582ba7..ffd01e02fe94 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -168,7 +168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
- false, I915_FENCE_TIMEOUT,
+ true, I915_FENCE_TIMEOUT,
GFP_KERNEL);
reservation_object_lock(obj->resv, NULL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 486051ed681d..8bd0c4966913 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -576,25 +576,25 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
}
static inline int
-mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
- u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
- /* Use an extended w/a on ivb+ if signalling from other rings */
- i915.semaphores ?
+ /* Use an extended w/a on gen7 if signalling from other rings */
+ (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
+ u32 *cs;
- /* These flags are for resource streamer on HSW+ */
+ flags |= MI_MM_SPACE_GTT;
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
- flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_GEN(dev_priv) < 8)
- flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
-
+ /* These flags are for resource streamer on HSW+ */
+ flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
+ else
+ flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (INTEL_GEN(dev_priv) >= 7)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2da3a94fc9f3..51e365f70464 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -196,7 +196,6 @@ search_again:
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
goto search_again;
found:
@@ -383,7 +382,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dd7181ed5eca..a3e59c8ef27b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -890,6 +890,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+ bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
int retry;
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -910,7 +911,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+ needs_unfenced_map) &&
i915_gem_object_is_tiled(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cee9c4fec52a..8bab4aea63e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2364,7 +2364,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0e8d1010cecb..6348353b91ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ /* The timeline struct (as part of the ppgtt underneath a context)
+ * may be freed when the request is no longer in use by the GPU.
+ * We could extend the life of a context to beyond that of all
+ * fences, possibly keeping the hw resource around indefinitely,
+ * or we just give them a false name. Since
+ * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+ * lie seems justifiable.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return "signaled";
+
return to_request(fence)->timeline->common->name;
}
@@ -180,7 +191,6 @@ i915_priotree_init(struct i915_priotree *pt)
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
- struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
@@ -192,15 +202,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
if (ret)
return ret;
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests > 1);
-
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- if (wait_for(intel_engine_is_idle(engine), 50))
- return -EBUSY;
+ struct i915_gem_timeline *timeline;
+ struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
@@ -211,14 +216,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
- }
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
- }
+ list_for_each_entry(timeline, &i915->gt.timelines, link)
+ memset(timeline->engine[id].sync_seqno, 0,
+ sizeof(timeline->engine[id].sync_seqno));
}
return 0;
@@ -295,7 +296,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* completion order.
*/
list_del(&request->ring_link);
- request->ring->last_retired_head = request->postfix;
+ request->ring->head = request->postfix;
if (!--request->i915->gt.active_requests) {
GEM_BUG_ON(!request->i915->gt.awake);
mod_delayed_work(request->i915->wq,
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 832ac9e45801..1642fff9cf13 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -30,16 +30,25 @@
/**
* DOC: GuC-based command submission
*
- * i915_guc_client:
- * We use the term client to avoid confusion with contexts. A i915_guc_client is
- * equivalent to GuC object guc_context_desc. This context descriptor is
- * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
- * and workqueue for it. Also the process descriptor (guc_process_desc), which
- * is mapped to client space. So the client can write Work Item then ring the
- * doorbell.
+ * GuC client:
+ * A i915_guc_client refers to a submission path through GuC. Currently, there
+ * is only one of these (the execbuf_client) and this one is charged with all
+ * submissions to the GuC. This struct is the owner of a doorbell, a process
+ * descriptor and a workqueue (all of them inside a single gem object that
+ * contains all required pages for these elements).
*
- * To simplify the implementation, we allocate one gem object that contains all
- * pages for doorbell, process descriptor and workqueue.
+ * GuC stage descriptor:
+ * During initialization, the driver allocates a static pool of 1024 such
+ * descriptors, and shares them with the GuC.
+ * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * guc_stage_desc (via the client's stage_id), so effectively only one
+ * gets used. This stage descriptor lets the GuC know about the doorbell,
+ * workqueue and process descriptor. Theoretically, it also lets the GuC
+ * know about our HW contexts (context ID, etc...), but we actually
+ * employ a kind of submission where the GuC uses the LRCA sent via the work
+ * item instead (the single guc_stage_desc associated to execbuf client
+ * contains information about the default kernel context only, but this is
+ * essentially unused). This is called a "proxy" submission.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -62,34 +71,91 @@
* ELSP context descriptor dword into Work Item.
* See guc_wq_item_append()
*
+ * ADS:
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
+ * scheduling policies (guc_policies), a structure describing a collection of
+ * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
+ * its internal state for sleep.
+ *
*/
+static inline bool is_high_priority(struct i915_guc_client* client)
+{
+ return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
+}
+
+static int __reserve_doorbell(struct i915_guc_client *client)
+{
+ unsigned long offset;
+ unsigned long end;
+ u16 id;
+
+ GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
+
+ /*
+ * The bitmap tracks which doorbell registers are currently in use.
+ * It is split into two halves; the first half is used for normal
+ * priority contexts, the second half for high-priority ones.
+ */
+ offset = 0;
+ end = GUC_NUM_DOORBELLS/2;
+ if (is_high_priority(client)) {
+ offset = end;
+ end += offset;
+ }
+
+ id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
+ if (id == end)
+ return -ENOSPC;
+
+ __set_bit(id, client->guc->doorbell_bitmap);
+ client->doorbell_id = id;
+ DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
+ client->stage_id, yesno(is_high_priority(client)),
+ id);
+ return 0;
+}
+
+static void __unreserve_doorbell(struct i915_guc_client *client)
+{
+ GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
+
+ __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+}
+
/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
-static int guc_allocate_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_release_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
+{
+ struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
+
+ return &base[client->stage_id];
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -97,107 +163,129 @@ static int guc_release_doorbell(struct intel_guc *guc,
* client object which contains the page being used for the doorbell
*/
-static int guc_update_doorbell_id(struct intel_guc *guc,
- struct i915_guc_client *client,
- u16 new_id)
+static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
{
- struct sg_table *sg = guc->ctx_pool_vma->pages;
- void *doorbell_bitmap = guc->doorbell_bitmap;
- struct guc_doorbell_info *doorbell;
- struct guc_context_desc desc;
- size_t len;
+ struct guc_stage_desc *desc;
- doorbell = client->vaddr + client->doorbell_offset;
+ /* Update the GuC's idea of the doorbell ID */
+ desc = __get_stage_desc(client);
+ desc->db_id = new_id;
+}
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
- test_bit(client->doorbell_id, doorbell_bitmap)) {
- /* Deactivate the old doorbell */
- doorbell->db_status = GUC_DOORBELL_DISABLED;
- (void)guc_release_doorbell(guc, client);
- __clear_bit(client->doorbell_id, doorbell_bitmap);
- }
+static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
+{
+ return client->vaddr + client->doorbell_offset;
+}
- /* Update the GuC's idea of the doorbell ID */
- len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
- desc.db_id = new_id;
- len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
-
- client->doorbell_id = new_id;
- if (new_id == GUC_INVALID_DOORBELL_ID)
- return 0;
+static bool has_doorbell(struct i915_guc_client *client)
+{
+ if (client->doorbell_id == GUC_DOORBELL_INVALID)
+ return false;
- /* Activate the new doorbell */
- __set_bit(new_id, doorbell_bitmap);
+ return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+}
+
+static int __create_doorbell(struct i915_guc_client *client)
+{
+ struct guc_doorbell_info *doorbell;
+ int err;
+
+ doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = client->doorbell_cookie;
- return guc_allocate_doorbell(guc, client);
+
+ err = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (err) {
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+ }
+ return err;
}
-static void guc_disable_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __destroy_doorbell(struct i915_guc_client *client)
{
- (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
+ struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+ struct guc_doorbell_info *doorbell;
+ u16 db_id = client->doorbell_id;
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+
+ doorbell = __get_doorbell(client);
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+
+ /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
+ * to go to zero after updating db_status before we call the GuC to
+ * release the doorbell */
+ if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ WARN_ONCE(true, "Doorbell never became invalid after disable\n");
+
+ return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
-static uint16_t
-select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+static int create_doorbell(struct i915_guc_client *client)
{
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- * Note that logically higher priorities are numerically less than
- * normal ones, so the test below means "is it high-priority?"
- */
- const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
- const uint16_t half = GUC_MAX_DOORBELLS / 2;
- const uint16_t start = hi_pri ? half : 0;
- const uint16_t end = start + half;
- uint16_t id;
+ int ret;
- id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
- if (id == end)
- id = GUC_INVALID_DOORBELL_ID;
+ ret = __reserve_doorbell(client);
+ if (ret)
+ return ret;
- DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
- hi_pri ? "high" : "normal", id);
+ __update_doorbell_desc(client, client->doorbell_id);
+
+ ret = __create_doorbell(client);
+ if (ret)
+ goto err;
+
+ return 0;
- return id;
+err:
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ __unreserve_doorbell(client);
+ return ret;
}
-/*
- * Select, assign and relase doorbell cachelines
- *
- * These functions track which doorbell cachelines are in use.
- * The data they manipulate is protected by the intel_guc_send lock.
- */
+static int destroy_doorbell(struct i915_guc_client *client)
+{
+ int err;
+
+ GEM_BUG_ON(!has_doorbell(client));
+
+ /* XXX: wait for any interrupts */
+ /* XXX: wait for workqueue to drain */
+
+ err = __destroy_doorbell(client);
+ if (err)
+ return err;
+
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+
+ __unreserve_doorbell(client);
-static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
+ return 0;
+}
+
+static unsigned long __select_cacheline(struct intel_guc* guc)
{
- const uint32_t cacheline_size = cache_line_size();
- uint32_t offset;
+ unsigned long offset;
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
/* Moving to next cache line to reduce contention */
- guc->db_cacheline += cacheline_size;
-
- DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cacheline_size);
+ guc->db_cacheline += cache_line_size();
+ DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
+ offset, guc->db_cacheline, cache_line_size());
return offset;
}
+static inline struct guc_process_desc *
+__get_process_desc(struct i915_guc_client *client)
+{
+ return client->vaddr + client->proc_desc_offset;
+}
+
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
@@ -206,9 +294,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->vaddr + client->proc_desc_offset;
-
- memset(desc, 0, sizeof(*desc));
+ desc = memset(__get_process_desc(client), 0, sizeof(*desc));
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -219,42 +305,41 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
- desc->context_id = client->ctx_index;
+ desc->stage_id = client->stage_id;
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
/*
- * Initialise/clear the context descriptor shared with the GuC firmware.
+ * Initialise/clear the stage descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-
-static void guc_ctx_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
- struct guc_context_desc desc;
- struct sg_table *sg;
+ struct guc_stage_desc *desc;
unsigned int tmp;
u32 gfx_addr;
- memset(&desc, 0, sizeof(desc));
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
- desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
- desc.context_id = client->ctx_index;
- desc.priority = client->priority;
- desc.db_id = client->doorbell_id;
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->stage_id = client->stage_id;
+ desc->priority = client->priority;
+ desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
+ struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -266,12 +351,22 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
if (!ce->state)
break; /* XXX: continue? */
+ /*
+ * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
+ * submission or, in other words, not using a direct submission
+ * model) the KMD's LRCA is not used for any work submission.
+ * Instead, the GuC uses the LRCA of the user mode context (see
+ * guc_wq_item_append below).
+ */
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- lrc->ring_lcra =
+ lrc->ring_lrca =
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
- lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
+
+ /* XXX: In direct submission, the GuC wants the HW context id
+ * here. In proxy submission, it wants the stage id */
+ lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
@@ -279,50 +374,36 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << guc_engine_id);
+ desc->engines_used |= (1 << guc_engine_id);
}
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc.engines_used);
- WARN_ON(desc.engines_used == 0);
+ client->engines, desc->engines_used);
+ WARN_ON(desc->engines_used == 0);
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
gfx_addr = guc_ggtt_offset(client->vma);
- desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
+ desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu =
- (uintptr_t)client->vaddr + client->doorbell_offset;
- desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc.process_desc = gfx_addr + client->proc_desc_offset;
- desc.wq_addr = gfx_addr + client->wq_offset;
- desc.wq_size = client->wq_size;
-
- /*
- * XXX: Take LRCs from an existing context if this is not an
- * IsKMDCreatedContext client
- */
- desc.desc_private = (uintptr_t)client;
+ desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+ desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
+ desc->process_desc = gfx_addr + client->proc_desc_offset;
+ desc->wq_addr = gfx_addr + client->wq_offset;
+ desc->wq_size = client->wq_size;
- /* Pool context is pinned already */
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc->desc_private = (uintptr_t)client;
}
-static void guc_ctx_desc_fini(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- struct guc_context_desc desc;
- struct sg_table *sg;
-
- memset(&desc, 0, sizeof(desc));
+ struct guc_stage_desc *desc;
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
}
/**
@@ -345,8 +426,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
+ struct guc_process_desc *desc = __get_process_desc(client);
u32 freespace;
int ret;
@@ -391,19 +471,17 @@ static void guc_wq_item_append(struct i915_guc_client *client,
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail;
- GEM_BUG_ON(tail & 7);
+ assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
@@ -436,19 +514,27 @@ static void guc_wq_item_append(struct i915_guc_client *client,
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
- wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
+ wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
}
+static void guc_reset_wq(struct i915_guc_client *client)
+{
+ struct guc_process_desc *desc = __get_process_desc(client);
+
+ desc->head = 0;
+ desc->tail = 0;
+
+ client->wq_tail = 0;
+}
+
static int guc_ring_doorbell(struct i915_guc_client *client)
{
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Update the tail so it is visible to GuC */
desc->tail = client->wq_tail;
@@ -463,7 +549,7 @@ static int guc_ring_doorbell(struct i915_guc_client *client)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = client->vaddr + client->doorbell_offset;
+ db = (union guc_doorbell_qw *)__get_doorbell(client);
while (attempt--) {
/* lets ring the doorbell */
@@ -573,23 +659,10 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *last = port[0].request;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return false;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *rq =
@@ -609,8 +682,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
RB_CLEAR_NODE(&rq->priotree.node);
rq->priotree.priority = INT_MAX;
- trace_i915_gem_request_in(rq, port - engine->execlist_port);
i915_guc_submit(rq);
+ trace_i915_gem_request_in(rq, port - engine->execlist_port);
last = rq;
submit = true;
}
@@ -619,7 +692,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
nested_enable_signaling(last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
return submit;
}
@@ -695,93 +768,100 @@ err:
return vma;
}
-static void
-guc_client_free(struct drm_i915_private *dev_priv,
- struct i915_guc_client *client)
+/* Check that a doorbell register is in the expected state */
+static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- if (!client)
- return;
-
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 drbregl;
+ bool valid;
- if (client->vaddr) {
- /*
- * If we got as far as setting up a doorbell, make sure we
- * shut it down before unmapping & deallocating the memory.
- */
- guc_disable_doorbell(guc, client);
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
- i915_gem_object_unpin_map(client->vma->obj);
- }
+ drbregl = I915_READ(GEN8_DRBREGL(db_id));
+ valid = drbregl & GEN8_DRB_VALID;
- i915_vma_unpin_and_release(&client->vma);
+ if (test_bit(db_id, guc->doorbell_bitmap) == valid)
+ return true;
- if (client->ctx_index != GUC_INVALID_CTX_ID) {
- guc_ctx_desc_fini(guc, client);
- ida_simple_remove(&guc->ctx_ids, client->ctx_index);
- }
+ DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
+ db_id, drbregl, yesno(valid));
- kfree(client);
+ return false;
}
-/* Check that a doorbell register is in the expected state */
-static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+/*
+ * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
+ * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
+ * doorbell to the rightful owner.
+ */
+static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- i915_reg_t drbreg = GEN8_DRBREGL(db_id);
- uint32_t value = I915_READ(drbreg);
- bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
- bool expected = test_bit(db_id, guc->doorbell_bitmap);
-
- if (enabled == expected)
- return true;
+ int err;
- DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
- db_id, drbreg.reg, value,
- expected ? "active" : "inactive");
+ __update_doorbell_desc(client, db_id);
+ err = __create_doorbell(client);
+ if (!err)
+ err = __destroy_doorbell(client);
- return false;
+ return err;
}
/*
- * Borrow the first client to set up & tear down each unused doorbell
- * in turn, to ensure that all doorbell h/w is (re)initialised.
+ * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
+ * HW is (re)initialised. For that end, we might have to borrow the first
+ * client. Also, tell GuC about all the doorbells in use by all clients.
+ * We do this because the KMD, the GuC and the doorbell HW can easily go out of
+ * sync (e.g. we can reset the GuC, but not the doorbel HW).
*/
-static void guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_init_doorbell_hw(struct intel_guc *guc)
{
struct i915_guc_client *client = guc->execbuf_client;
- uint16_t db_id;
- int i, err;
-
- guc_disable_doorbell(guc, client);
+ bool recreate_first_client = false;
+ u16 db_id;
+ int ret;
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
- /* Skip if doorbell is OK */
- if (guc_doorbell_check(guc, i))
+ /* For unused doorbells, make sure they are disabled */
+ for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
+ if (doorbell_ok(guc, db_id))
continue;
- err = guc_update_doorbell_id(guc, client, i);
- if (err)
- DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
- i, err);
+ if (has_doorbell(client)) {
+ /* Borrow execbuf_client (we will recreate it later) */
+ destroy_doorbell(client);
+ recreate_first_client = true;
+ }
+
+ ret = __reset_doorbell(client, db_id);
+ WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
}
- db_id = select_doorbell_register(guc, client->priority);
- WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
+ if (recreate_first_client) {
+ ret = __reserve_doorbell(client);
+ if (unlikely(ret)) {
+ DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
+ return ret;
+ }
+
+ __update_doorbell_desc(client, client->doorbell_id);
+ }
- err = guc_update_doorbell_id(guc, client, db_id);
- if (err)
- DRM_WARN("Failed to restore doorbell to %d, err %d\n",
- db_id, err);
+ /* Now for every client (and not only execbuf_client) make sure their
+ * doorbells are known by the GuC */
+ //for (client = client_list; client != NULL; client = client->next)
+ {
+ ret = __create_doorbell(client);
+ if (ret) {
+ DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
+ }
- /* Read back & verify all doorbell registers */
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
- (void)guc_doorbell_check(guc, i);
+ /* Read back & verify all (used & unused) doorbell registers */
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ WARN_ON(!doorbell_ok(guc, db_id));
+
+ return 0;
}
/**
@@ -807,49 +887,46 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
- uint16_t db_id;
+ int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- client->owner = ctx;
client->guc = guc;
+ client->owner = ctx;
client->engines = engines;
client->priority = priority;
- client->doorbell_id = GUC_INVALID_DOORBELL_ID;
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+ client->wq_offset = GUC_DB_SIZE;
+ client->wq_size = GUC_WQ_SIZE;
+ spin_lock_init(&client->wq_lock);
- client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
- GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
- if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
- client->ctx_index = GUC_INVALID_CTX_ID;
- goto err;
- }
+ ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err_client;
+
+ client->stage_id = ret;
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma))
- goto err;
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_id;
+ }
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr))
- goto err;
-
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
+ }
client->vaddr = vaddr;
- spin_lock_init(&client->wq_lock);
- client->wq_offset = GUC_DB_SIZE;
- client->wq_size = GUC_WQ_SIZE;
-
- db_id = select_doorbell_register(guc, client->priority);
- if (db_id == GUC_INVALID_DOORBELL_ID)
- /* XXX: evict a doorbell instead? */
- goto err;
-
- client->doorbell_offset = select_doorbell_cacheline(guc);
+ client->doorbell_offset = __select_cacheline(guc);
/*
* Since the doorbell only requires a single cacheline, we can save
@@ -862,28 +939,47 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->proc_desc_offset = (GUC_DB_SIZE / 2);
guc_proc_desc_init(guc, client);
- guc_ctx_desc_init(guc, client);
+ guc_stage_desc_init(guc, client);
- /* For runtime client allocation we need to enable the doorbell. Not
- * required yet for the static execbuf_client as this special kernel
- * client is enabled from i915_guc_submission_enable().
- *
- * guc_update_doorbell_id(guc, client, db_id);
- */
+ ret = create_doorbell(client);
+ if (ret)
+ goto err_vaddr;
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
- priority, client, client->engines, client->ctx_index);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
- client->doorbell_id, client->doorbell_offset);
+ DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
+ priority, client, client->engines, client->stage_id);
+ DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
return client;
-err:
- guc_client_free(dev_priv, client);
- return NULL;
+err_vaddr:
+ i915_gem_object_unpin_map(client->vma->obj);
+err_vma:
+ i915_vma_unpin_and_release(&client->vma);
+err_id:
+ ida_simple_remove(&guc->stage_ids, client->stage_id);
+err_client:
+ kfree(client);
+ return ERR_PTR(ret);
}
+static void guc_client_free(struct i915_guc_client *client)
+{
+ /*
+ * XXX: wait for any outstanding submissions before freeing memory.
+ * Be sure to drop any locks
+ */
+ /* FIXME: in many cases, by the time we get here the GuC has been
+ * reset, so we cannot destroy the doorbell properly. Ignore the
+ * error message for now */
+ destroy_doorbell(client);
+ guc_stage_desc_fini(client->guc, client);
+ i915_gem_object_unpin_map(client->vma->obj);
+ i915_vma_unpin_and_release(&client->vma);
+ ida_simple_remove(&client->guc->stage_ids, client->stage_id);
+ kfree(client);
+}
static void guc_policies_init(struct guc_policies *policies)
{
@@ -893,7 +989,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
- for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+ for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
@@ -907,7 +1003,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1;
}
-static void guc_addon_create(struct intel_guc *guc)
+static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma;
@@ -923,14 +1019,13 @@ static void guc_addon_create(struct intel_guc *guc)
enum intel_engine_id id;
u32 base;
- vma = guc->ads_vma;
- if (!vma) {
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return;
+ GEM_BUG_ON(guc->ads_vma);
- guc->ads_vma = vma;
- }
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
page = i915_vma_first_page(vma);
blob = kmap(page);
@@ -940,11 +1035,11 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
for_each_engine(engine, dev_priv, id) {
- blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
+ blob->reg_state.white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
+ blob->reg_state.white_list[engine->guc_id].count = 0;
}
/*
@@ -967,67 +1062,75 @@ static void guc_addon_create(struct intel_guc *guc)
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page);
+
+ return 0;
+}
+
+static void guc_ads_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->ads_vma);
}
/*
- * Set up the memory resources to be shared with the GuC. At this point,
- * we require just one object that can be mapped through the GGTT.
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
*/
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{
- const size_t ctxsize = sizeof(struct guc_context_desc);
- const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
- const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
+ int ret;
- if (!HAS_GUC_SCHED(dev_priv))
+ if (guc->stage_desc_pool)
return 0;
- /* Wipe bitmap & delete client in case of reinitialisation */
- bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
- i915_guc_submission_disable(dev_priv);
-
- if (!i915.enable_guc_submission)
- return 0; /* not enabled */
-
- if (guc->ctx_pool_vma)
- return 0; /* already allocated */
-
- vma = intel_guc_allocate_vma(guc, gemsize);
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS));
if (IS_ERR(vma))
return PTR_ERR(vma);
- guc->ctx_pool_vma = vma;
- ida_init(&guc->ctx_ids);
- intel_guc_log_create(guc);
- guc_addon_create(guc);
-
- guc->execbuf_client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CTX_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (!guc->execbuf_client) {
- DRM_ERROR("Failed to create GuC client for execbuf!\n");
- goto err;
+ guc->stage_desc_pool = vma;
+
+ vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
}
+ guc->stage_desc_pool_vaddr = vaddr;
+
+ ret = intel_guc_log_create(guc);
+ if (ret < 0)
+ goto err_vaddr;
+
+ ret = guc_ads_create(guc);
+ if (ret < 0)
+ goto err_log;
+
+ ida_init(&guc->stage_ids);
+
return 0;
-err:
- i915_guc_submission_fini(dev_priv);
- return -ENOMEM;
+err_log:
+ intel_guc_log_destroy(guc);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+err_vma:
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ return ret;
}
-static void guc_reset_wq(struct i915_guc_client *client)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
-
- desc->head = 0;
- desc->tail = 0;
+ struct intel_guc *guc = &dev_priv->guc;
- client->wq_tail = 0;
+ ida_destroy(&guc->stage_ids);
+ guc_ads_destroy(guc);
+ intel_guc_log_destroy(guc);
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1072,20 +1175,60 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int irqs;
+
+ /*
+ * tell all command streamers NOT to forward interrupts or vblank
+ * to GuC.
+ */
+ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+ irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+ /* route all GT interrupts to the host */
+ I915_WRITE(GUC_BCS_RCS_IER, 0);
+ I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+ I915_WRITE(GUC_WD_VECS_IER, 0);
+
+ dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+}
+
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err;
+
+ if (!client) {
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for execbuf!\n");
+ return PTR_ERR(client);
+ }
- if (!client)
- return -ENODEV;
+ guc->execbuf_client = client;
+ }
- intel_guc_sample_forcewake(guc);
+ err = intel_guc_sample_forcewake(guc);
+ if (err)
+ goto err_execbuf_client;
guc_reset_wq(client);
- guc_init_doorbell_hw(guc);
+
+ err = guc_init_doorbell_hw(guc);
+ if (err)
+ goto err_execbuf_client;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
@@ -1112,30 +1255,11 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
}
return 0;
-}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
- /* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
-
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+err_execbuf_client:
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return err;
}
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
@@ -1144,30 +1268,11 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_interrupts_release(dev_priv);
- if (!guc->execbuf_client)
- return;
-
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-}
-
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client;
- client = fetch_and_zero(&guc->execbuf_client);
- if (!client)
- return;
-
- guc_client_free(dev_priv, client);
-
- i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log.vma);
-
- if (guc->ctx_pool_vma)
- ida_destroy(&guc->ctx_ids);
- i915_vma_unpin_and_release(&guc->ctx_pool_vma);
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
}
/**
@@ -1196,7 +1301,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @dev_priv: i915 device private
@@ -1222,5 +1326,3 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
-
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8163d5024ff8..d9d196977f4a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1742,8 +1742,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
/* Handle flush interrupt in bottom half */
- queue_work(dev_priv->guc.log.flush_wq,
- &dev_priv->guc.log.flush_work);
+ queue_work(dev_priv->guc.log.runtime.flush_wq,
+ &dev_priv->guc.log.runtime.flush_work);
dev_priv->guc.log.flush_interrupt_count++;
} else {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 732101ed57fb..f87b0c4e564d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -61,6 +61,7 @@
.has_overlay = 1, .overlay_needs_physical = 1, \
.has_gmch_display = 1, \
.hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
@@ -102,6 +103,7 @@ static const struct intel_device_info intel_i915g_info = {
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i915gm_info = {
@@ -113,6 +115,7 @@ static const struct intel_device_info intel_i915gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945g_info = {
@@ -121,6 +124,7 @@ static const struct intel_device_info intel_i945g_info = {
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945gm_info = {
@@ -131,6 +135,7 @@ static const struct intel_device_info intel_i945gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 8c121187ff39..060b171480d5 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
ret = -ENODEV;
- goto err_alloc;
+ goto err_flags;
}
list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
err_open:
list_del(&stream->link);
+err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
if (ret)
return ret;
+ if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+ DRM_DEBUG("Unknown i915 perf property ID\n");
+ return -EINVAL;
+ }
+
switch ((enum drm_i915_perf_property_id)id) {
case DRM_I915_PERF_PROP_CTX_HANDLE:
props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
- default:
+ case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
- DRM_DEBUG("Unknown i915 perf property ID\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 04c8f69fcc62..11b12f412492 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7829,7 +7829,14 @@ enum {
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 94a3a3299910..c5455d36b617 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,24 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#undef WARN_ON
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+#endif
+
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long)(x), __func__)
+
#if GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index ba986edee312..b6ea192ad550 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -47,11 +47,12 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
unsigned int result;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
return result;
}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index c2cc33f3d888..dd3ad52b7dfe 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1442,16 +1442,33 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
- /* BSpec says "Do not use DisplayPort with CDCLK less than
- * 432 MHz, audio enabled, port width x4, and link rate
- * HBR2 (5.4 GHz), or else there may be audio corruption or
- * screen corruption."
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz and since GLK can output two
+ * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4)
- pixel_rate = max(432000, pixel_rate);
+ crtc_state->lane_count == 4) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 316800, pixel_rate);
+ else
+ pixel_rate = max(432000, pixel_rate);
+ }
+
+ /* According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ * The check for GLK has to be adjusted as the platform can output
+ * two pixels per clock.
+ */
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 2 * 96000, pixel_rate);
+ else
+ pixel_rate = max(2 * 96000, pixel_rate);
+ }
return pixel_rate;
}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 36832257cc9b..1575bde0cf90 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -49,7 +49,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d8214ba8da14..0914ad96a71b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -539,7 +539,7 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
@@ -806,7 +806,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
DP_TP_CTL_ENABLE);
}
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
@@ -837,7 +837,8 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-static struct intel_encoder *
+/* Finds the only possible encoder associated with the given CRTC. */
+struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1127,72 +1128,6 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
}
-static bool
-hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state,
- encoder);
- if (!pll)
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- return pll;
-}
-
-static bool
-skl_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- return true;
-}
-
-static bool
-bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
-}
-
-/*
- * Tries to find a *shared* PLL for the CRTC and store it in
- * intel_crtc->ddi_pll_sel.
- *
- * For private DPLLs, compute_config() should do the selection for us. This
- * function should be folded into compute_config() eventually.
- */
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
-
- if (IS_GEN9_BC(dev_priv))
- return skl_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else if (IS_GEN9_LP(dev_priv))
- return bxt_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else
- return hsw_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
-}
-
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1309,6 +1244,11 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ if (crtc_state->hdmi_scrambling)
+ temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+ if (crtc_state->hdmi_high_tmds_clock_ratio)
+ temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
@@ -1676,8 +1616,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level);
}
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll)
+static void intel_ddi_clk_select(struct intel_encoder *encoder,
+ struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
@@ -1881,6 +1821,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
+ bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
+ bool scrambling = pipe_config->hdmi_scrambling;
+
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ conn_state->connector,
+ clock_ratio, scrambling);
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
@@ -1914,6 +1860,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
if (old_crtc_state->has_audio)
intel_audio_codec_disable(intel_encoder);
+ if (type == INTEL_OUTPUT_HDMI) {
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ old_conn_state->connector,
+ false, false);
+ }
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2040,6 +1992,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
+
+ if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
+ TRANS_DDI_HDMI_SCRAMBLING_MASK)
+ pipe_config->hdmi_scrambling = true;
+ if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->lane_count = 4;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e217d04133e6..881dec88df6e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1997,7 +1997,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
unsigned int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return cpp;
case I915_FORMAT_MOD_X_TILED:
if (IS_GEN2(dev_priv))
@@ -2033,7 +2033,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 1;
else
return intel_tile_size(to_i915(fb->dev)) /
@@ -2107,7 +2107,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return 4096;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (INTEL_GEN(dev_priv) >= 9)
@@ -2290,7 +2290,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2345,7 +2345,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2471,7 +2471,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
@@ -2803,7 +2803,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
case 8:
@@ -2962,28 +2962,27 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
+ u32 dspcntr;
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
+ IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 4) {
- if (intel_crtc->pipe == PIPE_B)
+ if (crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
@@ -3010,7 +3009,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
if (INTEL_GEN(dev_priv) >= 4 &&
@@ -3023,25 +3023,66 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (rotation & DRM_REFLECT_X)
dspcntr |= DISPPLANE_MIRROR;
- if (IS_G4X(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ return dspcntr;
+}
- intel_add_fb_offsets(&x, &y, plane_state, 0);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ int src_x = plane_state->base.src.x1 >> 16;
+ int src_y = plane_state->base.src.y1 >> 16;
+ u32 offset;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
+ offset = intel_compute_tile_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
- if (rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- } else if (rotation & DRM_REFLECT_X) {
- x += crtc_state->pipe_src_w - 1;
+ if (rotation & DRM_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ src_x += src_w - 1;
+ }
}
+ plane_state->main.offset = offset;
+ plane_state->main.x = src_x;
+ plane_state->main.y = src_y;
+
+ return 0;
+}
+
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int plane = intel_crtc->plane;
+ u32 linear_offset;
+ u32 dspcntr = plane_state->ctl;
+ i915_reg_t reg = DSPCNTR(plane);
+ int x = plane_state->main.x;
+ int y = plane_state->main.y;
+ unsigned long irqflags;
+
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_crtc->dspaddr_offset = plane_state->main.offset;
+ else
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3068,7 +3109,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE_FW(reg, dspcntr);
I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ I915_WRITE_FW(DSPSURF(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
@@ -3105,101 +3151,10 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void ironlake_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
-
- dspcntr = DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- default:
- BUG();
- }
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
-
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- }
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- intel_crtc->adjusted_x = x;
- intel_crtc->adjusted_y = y;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE_FW(DSPSURF(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
- } else {
- I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
- }
- POSTING_READ_FW(reg);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 64;
else
return intel_tile_width_bytes(fb, plane);
@@ -3254,7 +3209,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
return stride;
}
-u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
@@ -3295,10 +3250,10 @@ u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
return PLANE_CTL_TILED_X;
@@ -3313,7 +3268,7 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
case DRM_ROTATE_0:
@@ -3335,6 +3290,37 @@ u32 skl_plane_ctl_rotation(unsigned int rotation)
return 0;
}
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |=
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE |
+ PLANE_CTL_PLANE_GAMMA_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ return plane_ctl;
+}
+
static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -3345,7 +3331,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = to_intel_plane(plane)->id;
enum pipe pipe = to_intel_plane(plane)->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
u32 surf_addr = plane_state->main.offset;
@@ -3360,19 +3346,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
int dst_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -6306,6 +6279,17 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n)
{
+ /*
+ * Reduce M/N as much as possible without loss in precision. Several DP
+ * dongles in particular seem to be fussy about too large *link* M/N
+ * values. The passed in values are more likely to have the least
+ * significant bits zero than M after rounding below, so do this first.
+ */
+ while ((m & 1) == 0 && (n & 1) == 0) {
+ m >>= 1;
+ n >>= 1;
+ }
+
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
@@ -8395,7 +8379,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
@@ -8851,8 +8835,14 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
- if (!intel_ddi_pll_select(crtc, crtc_state))
+ struct intel_encoder *encoder =
+ intel_ddi_get_crtc_new_encoder(crtc_state);
+
+ if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
+ }
}
crtc->lowfreq_avail = false;
@@ -9148,6 +9138,31 @@ out:
return active;
}
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int width = plane_state->base.crtc_w;
+ unsigned int stride = roundup_pow_of_two(width) * 4;
+
+ switch (stride) {
+ default:
+ WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+ width, stride);
+ stride = 256;
+ /* fallthrough */
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ }
+
+ return CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(stride);
+}
+
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
@@ -9159,26 +9174,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
if (plane_state && plane_state->base.visible) {
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
- unsigned int stride = roundup_pow_of_two(width) * 4;
-
- switch (stride) {
- default:
- WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
- width, stride);
- stride = 256;
- /* fallthrough */
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- }
-
- cntl |= CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(stride);
+ cntl = plane_state->ctl;
size = (height << 12) | width;
}
@@ -9211,6 +9208,43 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
}
}
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 cntl;
+
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ cntl |= pipe << 28; /* Connect to correct pipe */
+
+ switch (plane_state->base.crtc_w) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(plane_state->base.crtc_w);
+ return 0;
+ }
+
+ if (plane_state->base.rotation & DRM_ROTATE_180)
+ cntl |= CURSOR_ROTATE_180;
+
+ return cntl;
+}
+
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
@@ -9220,30 +9254,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (plane_state && plane_state->base.visible) {
- cntl = MCURSOR_GAMMA_ENABLE;
- switch (plane_state->base.crtc_w) {
- case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(plane_state->base.crtc_w);
- return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
-
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
-
- if (plane_state->base.rotation & DRM_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
- }
+ if (plane_state && plane_state->base.visible)
+ cntl = plane_state->ctl;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE_FW(CURCNTR(pipe), cntl);
@@ -10338,7 +10350,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
@@ -11692,6 +11704,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(hdmi_scrambling);
+ PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@@ -13285,6 +13300,14 @@ intel_check_primary_plane(struct drm_plane *plane,
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = i9xx_plane_ctl(crtc_state, state);
}
return 0;
@@ -13544,12 +13567,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_primary_formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
-
- primary->update_plane = ironlake_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13621,6 +13638,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
@@ -13640,7 +13658,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ if (!cursor_size_ok(dev_priv, state->base.crtc_w,
state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
@@ -13653,7 +13671,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
@@ -13668,12 +13686,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+ state->ctl = i845_cursor_ctl(crtc_state, state);
+ else
+ state->ctl = i9xx_cursor_ctl(crtc_state, state);
+
return 0;
}
@@ -14309,7 +14332,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0]);
goto err;
}
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
@@ -14332,7 +14355,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
goto err;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fb7afcf9e8be..aaee3949a422 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -398,6 +398,9 @@ struct intel_plane_state {
int x, y;
} aux;
+ /* plane control register */
+ u32 ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -729,6 +732,12 @@ struct intel_crtc_state {
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
+
+ /* HDMI scrambling status */
+ bool hdmi_scrambling;
+
+ /* HDMI High TMDS char rate ratio */
+ bool hdmi_high_tmds_clock_ratio;
};
struct intel_crtc {
@@ -1220,12 +1229,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll);
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@@ -1236,8 +1242,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-bool intel_ddi_pll_select(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1246,7 +1252,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@@ -1445,12 +1450,12 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
-u32 skl_plane_ctl_format(uint32_t pixel_format);
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
-u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1620,6 +1625,10 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4200faa520c7..854e8e0c836b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -36,45 +36,45 @@ static const struct engine_info {
int (*init_execlists)(struct intel_engine_cs *engine);
} intel_engines[] = {
[RCS] = {
- .name = "render ring",
- .exec_id = I915_EXEC_RENDER,
+ .name = "rcs",
.hw_id = RCS_HW,
+ .exec_id = I915_EXEC_RENDER,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
},
[BCS] = {
- .name = "blitter ring",
- .exec_id = I915_EXEC_BLT,
+ .name = "bcs",
.hw_id = BCS_HW,
+ .exec_id = I915_EXEC_BLT,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
},
[VCS] = {
- .name = "bsd ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs",
.hw_id = VCS_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
},
[VCS2] = {
- .name = "bsd2 ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs2",
.hw_id = VCS2_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd2_ring_buffer,
},
[VECS] = {
- .name = "video enhancement ring",
- .exec_id = I915_EXEC_VEBOX,
+ .name = "vecs",
.hw_id = VECS_HW,
+ .exec_id = I915_EXEC_VEBOX,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
@@ -242,12 +242,12 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
void *semaphores;
/* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap(page);
+ semaphores = kmap_atomic(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap(page);
+ kunmap_atomic(semaphores);
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
@@ -1111,6 +1111,15 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return false;
+
+ /* If the driver is wedged, HW state may be very inconsistent and
+ * report that it is still busy, even though we have stopped using it.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return true;
+
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_is_idle(engine))
return false;
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 25691f0e4c50..cb36cbf3818f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -26,14 +26,14 @@
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
-#define GUC_CTX_PRIORITY_KMD_HIGH 0
-#define GUC_CTX_PRIORITY_HIGH 1
-#define GUC_CTX_PRIORITY_KMD_NORMAL 2
-#define GUC_CTX_PRIORITY_NORMAL 3
-#define GUC_CTX_PRIORITY_NUM 4
+#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
+#define GUC_CLIENT_PRIORITY_HIGH 1
+#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
+#define GUC_CLIENT_PRIORITY_NORMAL 3
+#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_GPU_CONTEXTS 1024
-#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_MAX_STAGE_DESCRIPTORS 1024
+#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
@@ -68,14 +68,14 @@
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
-#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
-#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
-#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
-#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
-#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
-#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
-#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
-#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
+#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
+#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
+#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
+#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
+#define GUC_STAGE_DESC_ATTR_RESET BIT(4)
+#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
+#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
+#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
/* The guc control data is 10 DWORDs */
#define GUC_CTL_CTXINFO 0
@@ -241,8 +241,8 @@ union guc_doorbell_qw {
u64 value_qw;
} __packed;
-#define GUC_MAX_DOORBELLS 256
-#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
+#define GUC_NUM_DOORBELLS 256
+#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
@@ -251,12 +251,12 @@ union guc_doorbell_qw {
struct guc_wq_item {
u32 header;
u32 context_desc;
- u32 ring_tail;
+ u32 submit_element_info;
u32 fence_id;
} __packed;
struct guc_process_desc {
- u32 context_id;
+ u32 stage_id;
u64 db_base_addr;
u32 head;
u32 tail;
@@ -278,7 +278,7 @@ struct guc_execlist_context {
u32 context_desc;
u32 context_id;
u32 ring_status;
- u32 ring_lcra;
+ u32 ring_lrca;
u32 ring_begin;
u32 ring_end;
u32 ring_next_free_location;
@@ -289,10 +289,18 @@ struct guc_execlist_context {
u16 engine_submit_queue_count;
} __packed;
-/*Context descriptor for communicating between uKernel and Driver*/
-struct guc_context_desc {
+/*
+ * This structure describes a stage set arranged for a particular communication
+ * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
+ * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
+ * to avoid confusion with all the other things already named "context" in the
+ * driver. A static pool of these descriptors are stored inside a GEM object
+ * (stage_desc_pool) which is held for the entire lifetime of our interaction
+ * with the GuC, being allocated before the GuC is loaded with its firmware.
+ */
+struct guc_stage_desc {
u32 sched_common_area;
- u32 context_id;
+ u32 stage_id;
u32 pas_id;
u8 engines_used;
u64 db_trigger_cpu;
@@ -359,7 +367,7 @@ struct guc_policy {
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+ struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
@@ -401,16 +409,17 @@ struct guc_mmio_regset {
u32 number_of_registers;
} __packed;
+/* MMIO registers that are set as non privileged */
+struct mmio_white_list {
+ u32 mmio_start;
+ u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+ u32 count;
+} __packed;
+
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
-
- /* MMIO registers that are set as non privileged */
- struct __packed {
- u32 mmio_start;
- u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
- u32 count;
- } mmio_white_list[GUC_MAX_ENGINES_NUM];
+ struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
} __packed;
/* GuC Additional Data Struct */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 2f270d02894c..8a1a023e48b2 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -73,22 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
-/* User-friendly representation of an enum */
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- default:
- return "UNKNOWN!";
- }
-};
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
@@ -148,16 +132,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
} else
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
- if (guc->ads_vma) {
+ /* If GuC submission is enabled, set up additional parameters here */
+ if (i915.enable_guc_submission) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
- }
-
- /* If GuC submission is enabled, set up additional parameters here */
- if (i915.enable_guc_submission) {
- u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
- u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
@@ -430,24 +412,3 @@ int intel_guc_select_fw(struct intel_guc *guc)
return 0;
}
-
-/**
- * intel_guc_fini() - clean up all allocated resources
- * @dev_priv: i915 device private
- */
-void intel_guc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct drm_i915_gem_object *obj;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_guc_submission_disable(dev_priv);
- i915_guc_submission_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- obj = fetch_and_zero(&guc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 5c0f9a49da0e..6fb63a3c65b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = {
.remove_buf_file = remove_buf_file_callback,
};
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
- relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
-
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
-
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
- * boot time logs and provides enough leeway to User, in terms of
- * latency, for consuming the logs from relay. Also doesn't take
- * up too much memory.
- */
- n_subbufs = 8;
-
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
- if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- return -ENOMEM;
- }
-
- GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.relay_chan = guc_log_relay_chan;
- return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
+static int guc_log_relay_file_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
+ if (i915.guc_log_level < 0)
+ return 0;
+
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
@@ -197,8 +166,8 @@ static int guc_log_create_relay_file(struct intel_guc *guc)
return -ENODEV;
}
- ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
- if (ret) {
+ ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
+ if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
@@ -214,15 +183,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+ relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
- relay_flush(guc->log.relay_chan);
+ relay_flush(guc->log.runtime.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
- if (!guc->log.relay_chan)
+ if (!guc->log.runtime.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
@@ -233,7 +202,7 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
- return relay_reserve(guc->log.relay_chan, 0);
+ return relay_reserve(guc->log.runtime.relay_chan, 0);
}
static bool guc_check_log_buf_overflow(struct intel_guc *guc,
@@ -284,11 +253,11 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
void *src_data, *dst_data;
bool new_overflow;
- if (WARN_ON(!guc->log.buf_addr))
+ if (WARN_ON(!guc->log.runtime.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.buf_addr;
+ log_buf_state = src_data = guc->log.runtime.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
@@ -371,153 +340,113 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
}
}
-static void guc_log_cleanup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* First disable the flush interrupt */
- gen9_disable_guc_interrupts(dev_priv);
-
- if (guc->log.flush_wq)
- destroy_workqueue(guc->log.flush_wq);
-
- guc->log.flush_wq = NULL;
-
- if (guc->log.relay_chan)
- guc_log_remove_relay_file(guc);
-
- guc->log.relay_chan = NULL;
-
- if (guc->log.buf_addr)
- i915_gem_object_unpin_map(guc->log.vma->obj);
-
- guc->log.buf_addr = NULL;
-}
-
static void capture_logs_work(struct work_struct *work)
{
struct intel_guc *guc =
- container_of(work, struct intel_guc, log.flush_work);
+ container_of(work, struct intel_guc, log.runtime.flush_work);
guc_log_capture_logs(guc);
}
-static int guc_log_create_extras(struct intel_guc *guc)
+static bool guc_log_has_runtime(struct intel_guc *guc)
+{
+ return guc->log.runtime.buf_addr != NULL;
+}
+
+static int guc_log_runtime_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- int ret;
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+ int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Nothing to do */
- if (i915.guc_log_level < 0)
- return 0;
-
- if (!guc->log.buf_addr) {
- /* Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
- */
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
- return ret;
- }
+ GEM_BUG_ON(guc_log_has_runtime(guc));
- guc->log.buf_addr = vaddr;
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return PTR_ERR(vaddr);
}
- if (!guc->log.relay_chan) {
- /* Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- ret = guc_log_create_relay_channel(guc);
- if (ret)
- return ret;
- }
+ guc->log.runtime.buf_addr = vaddr;
- if (!guc->log.flush_wq) {
- INIT_WORK(&guc->log.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (guc->log.flush_wq == NULL) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-void intel_guc_log_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- unsigned long offset;
- uint32_t size, flags;
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- vma = guc->log.vma;
- if (!vma) {
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_has_memcpy_from_wc())) {
- /* logging will not be enabled */
- i915.guc_log_level = -1;
- return;
- }
+ ret = -ENOMEM;
+ goto err_vaddr;
+ }
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma)) {
- /* logging will be off */
- i915.guc_log_level = -1;
- return;
- }
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.runtime.relay_chan = guc_log_relay_chan;
+
+ INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ ret = -ENOMEM;
+ goto err_relaychan;
+ }
- guc->log.vma = vma;
+ return 0;
- if (guc_log_create_extras(guc)) {
- guc_log_cleanup(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
- i915.guc_log_level = -1;
- return;
- }
- }
+err_relaychan:
+ relay_close(guc->log.runtime.relay_chan);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
+ return ret;
+}
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+static void guc_log_runtime_destroy(struct intel_guc *guc)
+{
+ /*
+ * It's possible that the runtime stuff was never allocated because
+ * guc_log_level was < 0 at the time
+ **/
+ if (!guc_log_has_runtime(guc))
+ return;
- offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ relay_close(guc->log.runtime.relay_chan);
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
}
static int guc_log_late_setup(struct intel_guc *guc)
@@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (i915.guc_log_level < 0)
- return -EINVAL;
-
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
- */
- ret = guc_log_create_extras(guc);
- if (ret)
- goto err;
+ if (!guc_log_has_runtime(guc)) {
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_runtime_create(guc);
+ if (ret)
+ goto err;
+ }
- ret = guc_log_create_relay_file(guc);
+ ret = guc_log_relay_file_create(guc);
if (ret)
- goto err;
+ goto err_runtime;
return 0;
+
+err_runtime:
+ guc_log_runtime_destroy(guc);
err:
- guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
@@ -577,7 +507,7 @@ static void guc_flush_logs(struct intel_guc *guc)
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
- flush_work(&guc->log.flush_work);
+ flush_work(&guc->log.runtime.flush_work);
/* Ask GuC to update the log buffer state */
guc_log_flush(guc);
@@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc)
guc_log_capture_logs(guc);
}
+int intel_guc_log_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ unsigned long offset;
+ uint32_t size, flags;
+ int ret;
+
+ GEM_BUG_ON(guc->log.vma);
+
+ if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+ /* The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap */
+ size = (1 + GUC_LOG_DPC_PAGES + 1 +
+ GUC_LOG_ISR_PAGES + 1 +
+ GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_has_memcpy_from_wc())) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ guc->log.vma = vma;
+
+ if (i915.guc_log_level >= 0) {
+ ret = guc_log_runtime_create(guc);
+ if (ret < 0)
+ goto err_vma;
+ }
+
+ /* each allocated unit is a page */
+ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+ (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+ (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+ (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+
+ return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&guc->log.vma);
+err:
+ /* logging will be off */
+ i915.guc_log_level = -1;
+ return ret;
+}
+
+void intel_guc_log_destroy(struct intel_guc *guc)
+{
+ guc_log_runtime_destroy(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+}
+
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
struct intel_guc *guc = &dev_priv->guc;
@@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
return ret;
}
- i915.guc_log_level = log_param.verbosity;
+ if (log_param.logging_enabled) {
+ i915.guc_log_level = log_param.verbosity;
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled.
- */
- if (!dev_priv->guc.log.relay_chan) {
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled. Try again now, just in case.
+ */
ret = guc_log_late_setup(guc);
- if (!ret)
- gen9_enable_guc_interrupts(dev_priv);
- } else if (!log_param.logging_enabled) {
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
+ return ret;
+ }
+
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_enable_guc_interrupts(dev_priv);
+ } else {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
@@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
- } else {
- /* In case interrupts were disabled, enable them now */
- gen9_enable_guc_interrupts(dev_priv);
}
return ret;
@@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission)
+ if (!i915.enable_guc_submission || i915.guc_log_level < 0)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_cleanup(&dev_priv->guc);
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_disable_guc_interrupts(dev_priv);
+ guc_log_runtime_destroy(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 8c04eca84351..e1ab6432a914 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,6 +45,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
+ if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3eec74ca5116..1d623b5e09d6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_scdc_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
@@ -1208,6 +1209,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv))
return 165000;
+ else if (IS_GEMINILAKE(dev_priv))
+ return 594000;
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
@@ -1334,6 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
@@ -1403,6 +1407,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
+ if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.low_rates)
+ pipe_config->hdmi_scrambling = true;
+
+ if (pipe_config->port_clock > 340000) {
+ pipe_config->hdmi_scrambling = true;
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
+ }
+ }
+
return true;
}
@@ -1812,6 +1826,57 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+/*
+ * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup
+ * @encoder: intel_encoder
+ * @connector: drm_connector
+ * @high_tmds_clock_ratio = bool to indicate if the function needs to set
+ * or reset the high tmds clock ratio for scrambling
+ * @scrambling: bool to Indicate if the function needs to set or reset
+ * sink scrambling
+ *
+ * This function handles scrambling on HDMI 2.0 capable sinks.
+ * If required clock rate is > 340 Mhz && scrambling is supported by sink
+ * it enables scrambling. This should be called before enabling the HDMI
+ * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
+ * detect a scrambled clock within 100 ms.
+ */
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_scrambling *sink_scrambling =
+ &connector->display_info.hdmi.scdc.scrambling;
+ struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus);
+ bool ret;
+
+ if (!sink_scrambling->supported)
+ return;
+
+ DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
+ encoder->base.name, connector->name);
+
+ /* Set TMDS bit clock ratio to 1/40 or 1/10 */
+ ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
+ if (!ret) {
+ DRM_ERROR("Set TMDS ratio failed\n");
+ return;
+ }
+
+ /* Enable/disable sink scrambling */
+ ret = drm_scdc_set_scrambling(adptr, scrambling);
+ if (!ret) {
+ DRM_ERROR("Set sink scrambling failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("sink scrambling handled\n");
+}
+
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 7af900bcdc05..9ee819666a4c 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -251,24 +251,6 @@ fail:
}
/**
- * intel_huc_fini() - clean up resources allocated for HuC
- * @dev_priv: the drm_i915_private device
- *
- * Cleans up by releasing the huc firmware GEM obj.
- */
-void intel_huc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&huc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
-/**
* intel_guc_auth_huc() - authenticate ucode
* @dev_priv: the drm_i915_device
*
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 7a5b41b1c024..d8ca187ae001 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -331,6 +331,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* audio driver and i915
* @dev_priv: the i915 drm device private data
* @eld : ELD data
+ * @pipe: pipe id
* @port: port id
* @tmds_clk_speed: tmds clock frequency in Hz
*
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 77168e673e0a..c8f7c631fc1f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,7 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8));
+ assert_ring_tail_valid(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
@@ -399,22 +399,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return;
-
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
@@ -448,7 +435,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *cursor =
@@ -500,7 +487,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
if (submit)
execlists_submit_ports(engine);
@@ -530,24 +517,36 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+ * imposing the cost of a locked atomic transaction when submitting a
+ * new request (outside of the context-switch interrupt).
+ */
+ while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
u32 __iomem *csb_mmio =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
u32 __iomem *buf =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
- unsigned int csb, head, tail;
-
- csb = readl(csb_mmio);
- head = GEN8_CSB_READ_PTR(csb);
- tail = GEN8_CSB_WRITE_PTR(csb);
- if (head == tail)
- break;
+ unsigned int head, tail;
+
+ /* The write will be ordered by the uncached read (itself
+ * a memory barrier), so we do not need another in the form
+ * of a locked instruction. The race between the interrupt
+ * handler and the split test/clear is harmless as we order
+ * our clear before the CSB read. If the interrupt arrived
+ * first between the test and the clear, we read the updated
+ * CSB and clear the bit. If the interrupt arrives as we read
+ * the CSB or later (i.e. after we had cleared the bit) the bit
+ * is set and we do a new loop.
+ */
+ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ head = readl(csb_mmio);
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ while (head != tail) {
+ unsigned int status;
- if (tail < head)
- tail += GEN8_CSB_ENTRIES;
- do {
- unsigned int idx = ++head % GEN8_CSB_ENTRIES;
- unsigned int status = readl(buf + 2 * idx);
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
/* We are flying near dragons again.
*
@@ -566,11 +565,12 @@ static void intel_lrc_irq_handler(unsigned long data)
* status notifier.
*/
+ status = readl(buf + 2 * head);
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
/* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
+ GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
port[0].context_id);
GEM_BUG_ON(port[0].count == 0);
@@ -588,10 +588,9 @@ static void intel_lrc_irq_handler(unsigned long data)
GEM_BUG_ON(port[0].count == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- } while (head < tail);
+ }
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- GEN8_CSB_WRITE_PTR(csb) << 8),
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
csb_mmio);
}
@@ -647,15 +646,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine;
+ struct intel_engine_cs *engine =
+ container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+ GEM_BUG_ON(!locked);
- engine = container_of(pt,
- struct drm_i915_gem_request,
- priotree)->engine;
if (engine != locked) {
- if (locked)
- spin_unlock_irq(&locked->timeline->lock);
- spin_lock_irq(&engine->timeline->lock);
+ spin_unlock(&locked->timeline->lock);
+ spin_lock(&engine->timeline->lock);
}
return engine;
@@ -663,7 +661,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{
- struct intel_engine_cs *engine = NULL;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
LIST_HEAD(dfs);
@@ -697,26 +695,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
- list_for_each_entry(p, &pt->signalers_list, signal_link)
+ /* Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
+ }
list_safe_reset_next(dep, p, dfs_link);
- if (!RB_EMPTY_NODE(&pt->node))
- continue;
-
- engine = pt_lock_engine(pt, engine);
-
- /* If it is not already in the rbtree, we can update the
- * priority inplace and skip over it (and its dependencies)
- * if it is referenced *again* as we descend the dfs.
- */
- if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
- pt->priority = prio;
- list_del_init(&dep->dfs_link);
- }
}
+ engine = request->engine;
+ spin_lock_irq(&engine->timeline->lock);
+
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
@@ -728,16 +723,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio <= pt->priority)
continue;
- GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
pt->priority = prio;
- rb_erase(&pt->node, &engine->execlist_queue);
- if (insert_request(pt, &engine->execlist_queue))
- engine->execlist_first = &pt->node;
+ if (!RB_EMPTY_NODE(&pt->node)) {
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
}
- if (engine)
- spin_unlock_irq(&engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* XXX Do we need to preempt to make room for us and our deps? */
}
@@ -1255,7 +1249,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
request->ring->head = request->postfix;
- request->ring->last_retired_head = -1;
intel_ring_update_space(request->ring);
/* Catch up with any missed context-switch interrupts */
@@ -1268,8 +1261,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(request->ctx != port[0].request->ctx);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ request->tail =
+ intel_ring_wrap(request->ring,
+ request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
+ assert_ring_tail_valid(request->ring, request->tail);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1480,7 +1475,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
@@ -1508,7 +1503,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
@@ -1575,6 +1570,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->schedule = execlists_schedule;
+ engine->irq_tasklet.func = intel_lrc_irq_handler;
}
static void
@@ -2041,7 +2037,6 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0;
- ce->ring->last_retired_head = -1;
intel_ring_update_space(ce->ring);
}
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 441c01466384..d44465190dc1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -920,6 +920,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
void *base;
+ const void *vbt;
+ u32 vbt_size;
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
@@ -972,45 +974,46 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
- if (!dmi_check_system(intel_no_opregion_vbt)) {
- const void *vbt = NULL;
- u32 vbt_size = 0;
-
- if (opregion->header->opregion_ver >= 2 && opregion->asle &&
- opregion->asle->rvda && opregion->asle->rvds) {
- opregion->rvda = memremap(opregion->asle->rvda,
- opregion->asle->rvds,
- MEMREMAP_WB);
- vbt = opregion->rvda;
- vbt_size = opregion->asle->rvds;
- }
+ if (dmi_check_system(intel_no_opregion_vbt))
+ goto out;
+ if (opregion->header->opregion_ver >= 2 && opregion->asle &&
+ opregion->asle->rvda && opregion->asle->rvds) {
+ opregion->rvda = memremap(opregion->asle->rvda,
+ opregion->asle->rvds,
+ MEMREMAP_WB);
+ vbt = opregion->rvda;
+ vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
+ goto out;
} else {
- vbt = base + OPREGION_VBT_OFFSET;
- /*
- * The VBT specification says that if the ASLE ext
- * mailbox is not used its area is reserved, but
- * on some CHT boards the VBT extends into the
- * ASLE ext area. Allow this even though it is
- * against the spec, so we do not end up rejecting
- * the VBT on those boards (and end up not finding the
- * LCD panel because of this).
- */
- vbt_size = (mboxes & MBOX_ASLE_EXT) ?
- OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
- vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
- opregion->vbt = vbt;
- opregion->vbt_size = vbt_size;
- }
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
}
}
+ vbt = base + OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext mailbox is not used
+ * its area is reserved, but on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is against the spec, so we
+ * do not end up rejecting the VBT on those boards (and end up not
+ * finding the LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
+ if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ opregion->vbt = vbt;
+ opregion->vbt_size = vbt_size;
+ } else {
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ }
+
+out:
return 0;
err_out:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aece0ff88a5d..570bd603f401 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -655,6 +655,29 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
+static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->base.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->base.fb != NULL;
+ else
+ return plane_state->base.visible;
+}
+
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc, *enabled = NULL;
@@ -1961,7 +1984,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -1990,7 +2013,7 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -2013,15 +2036,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
{
int cpp;
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (!cstate->base.active || !pstate->base.fb)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -2038,7 +2053,7 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
{
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -3346,19 +3361,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Caller should take care of dividing & rounding off the value.
*/
static uint32_t
-skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
+ struct intel_plane *plane = to_intel_plane(pstate->base.plane);
uint32_t downscale_h, downscale_w;
uint32_t src_w, src_h, dst_w, dst_h;
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return DRM_PLANE_HELPER_NO_SCALING;
/* n.b., src is 16.16 fixed point, dst is whole integer */
- src_w = drm_rect_width(&pstate->base.src);
- src_h = drm_rect_height(&pstate->base.src);
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
+ if (plane->id == PLANE_CURSOR) {
+ src_w = pstate->base.src_w;
+ src_h = pstate->base.src_h;
+ dst_w = pstate->base.crtc_w;
+ dst_h = pstate->base.crtc_h;
+ } else {
+ src_w = drm_rect_width(&pstate->base.src);
+ src_h = drm_rect_height(&pstate->base.src);
+ dst_w = drm_rect_width(&pstate->base.dst);
+ dst_h = drm_rect_height(&pstate->base.dst);
+ }
+
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
@@ -3374,6 +3399,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
+ struct intel_plane *plane = to_intel_plane(pstate->plane);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
@@ -3386,7 +3412,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
fb = pstate->fb;
format = fb->format->format;
- if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
return 0;
@@ -3410,7 +3436,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
data_rate = width * height * fb->format->cpp[0];
}
- down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
return (uint64_t)data_rate * down_scale_amount >> 16;
}
@@ -3702,7 +3728,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
uint64_t pixel_rate;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return 0;
/*
@@ -3710,7 +3736,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* with additional adjustments for plane-specific scaling.
*/
adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(pstate);
+ downscale_amount = skl_plane_downscale_amount(cstate, pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
@@ -3727,6 +3753,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint8_t *out_lines, /* out */
bool *enabled /* out */)
{
+ struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
struct drm_plane_state *pstate = &intel_pstate->base;
struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
@@ -3746,7 +3773,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
bool y_tiled, x_tiled;
- if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
+ if (latency == 0 ||
+ !intel_wm_plane_visible(cstate, intel_pstate)) {
*enabled = false;
return 0;
}
@@ -3762,8 +3790,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (apply_memory_bw_wa && x_tiled)
latency += 15;
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ if (plane->id == PLANE_CURSOR) {
+ width = intel_pstate->base.crtc_w;
+ height = intel_pstate->base.crtc_h;
+ } else {
+ width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ }
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
@@ -8055,7 +8088,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT;
default:
- MISSING_CASE(flags)
+ MISSING_CASE(flags);
return 0;
}
}
@@ -8355,6 +8388,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
u32 lower, upper, tmp;
+ int loop = 2;
/* The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
@@ -8383,7 +8417,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
I915_WRITE_FW(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = I915_READ_FW(reg);
- } while (upper != tmp);
+ } while (upper != tmp && --loop);
/* Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f7c6383ecd06..66a2b8b83972 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,13 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring)
{
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
- }
-
- ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
- ring->tail, ring->size);
+ ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
}
static int
@@ -618,12 +612,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
}
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO) {
- struct intel_ring *ring = request->ring;
-
- ring->head = request->postfix;
- ring->last_retired_head = -1;
- }
+ if (request->fence.error == -EIO)
+ request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
}
@@ -784,7 +774,7 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
I915_WRITE_TAIL(request->engine, request->tail);
}
@@ -796,7 +786,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
*cs++ = MI_USER_INTERRUPT;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
@@ -835,7 +825,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
*cs++ = MI_NOOP;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1392,7 +1382,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
vma = intel_ring_create_vma(engine->i915, size);
@@ -1573,10 +1562,8 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail;
- engine->buffer->last_retired_head = -1;
- }
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -2130,7 +2117,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
num_rings =
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
- engine->emit_breadcrumb_sz += num_rings * 6;
+ engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 847aea554464..a82a0807f64d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -149,16 +149,6 @@ struct intel_ring {
int space;
int size;
int effective_size;
-
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
};
struct i915_gem_context;
@@ -442,18 +432,10 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline unsigned
+static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
- return 1 << engine->id;
-}
-
-static inline void
-intel_flush_status_page(struct intel_engine_cs *engine, int reg)
-{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ return BIT(engine->id);
}
static inline u32
@@ -464,14 +446,22 @@ intel_read_status_page(struct intel_engine_cs *engine, int reg)
}
static inline void
-intel_write_status_page(struct intel_engine_cs *engine,
- int reg, u32 value)
+intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- engine->status_page.page_addr[reg] = value;
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ /* Writing into the status page should be done sparingly. Since
+ * we do when we are uncertain of the device state, we take a bit
+ * of extra paranoia to try and ensure that the HWS takes the value
+ * we give and that it doesn't end up trapped inside the CPU!
+ */
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
+ engine->status_page.page_addr[reg] = value;
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
+ } else {
+ WRITE_ONCE(engine->status_page.page_addr[reg], value);
+ }
}
/*
@@ -525,12 +515,29 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
}
static inline u32
-intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline u32
+intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - req->ring->vaddr;
GEM_BUG_ON(offset > req->ring->size);
- return offset & (req->ring->size - 1);
+ return intel_ring_wrap(req->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ /* We could combine these into a single tail operation, but keeping
+ * them as seperate tests will help identify the cause should one
+ * ever fire.
+ */
+ GEM_BUG_ON(!IS_ALIGNED(tail, 8));
+ GEM_BUG_ON(tail >= ring->size);
}
void intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 012bc358a33a..f8a375f8dde6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2840,8 +2840,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct device *kdev = &pdev->dev;
+ int ret;
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
@@ -2871,7 +2873,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- WARN_ON_ONCE(ret < 0);
+ WARN_ONCE(ret < 0,
+ "pm_runtime_get_if_in_use() failed: %d\n", ret);
if (ret <= 0)
return false;
}
@@ -2955,8 +2958,11 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev_priv)) {
+ int ret;
+
pm_runtime_dont_use_autosuspend(kdev);
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
} else {
pm_runtime_use_autosuspend(kdev);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b931d0bd7a64..f7d431427115 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -217,7 +217,7 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = intel_plane->id;
enum pipe pipe = intel_plane->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
@@ -232,24 +232,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -361,32 +343,15 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
-static void
-vlv_update_plane(struct drm_plane *dplane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane_id = intel_plane->id;
- u32 sprctl;
- u32 sprsurf_offset, linear_offset;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
- sprctl = SP_ENABLE;
+ sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
@@ -423,20 +388,10 @@ vlv_update_plane(struct drm_plane *dplane,
sprctl |= SP_FORMAT_RGBA8888;
break;
default:
- /*
- * If we get here one of the upper layers failed to filter
- * out the unsupported plane formats
- */
- BUG();
- break;
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SP_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
@@ -449,22 +404,36 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+vlv_update_plane(struct drm_plane *dplane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
+ u32 sprctl = plane_state->ctl;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
- src_w--;
- src_h--;
crtc_w--;
crtc_h--;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- } else if (rotation & DRM_REFLECT_X) {
- x += src_w;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -516,31 +485,23 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ivb_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- u32 sprctl, sprscale = 0;
- u32 sprsurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
+
+ sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
+
+ if (IS_IVYBRIDGE(dev_priv))
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl = SPRITE_ENABLE;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
@@ -562,34 +523,48 @@ ivb_update_plane(struct drm_plane *plane,
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SPRITE_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
if (rotation & DRM_ROTATE_180)
sprctl |= SPRITE_ROTATE_180;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
- else
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ u32 sprctl = plane_state->ctl, sprscale = 0;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -599,16 +574,6 @@ ivb_update_plane(struct drm_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -664,31 +629,20 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ilk_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
- u32 dvscntr, dvsscale;
- u32 dvssurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 dvscntr;
- dvscntr = DVS_ENABLE;
+ dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
+
+ if (IS_GEN6(dev_priv))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
@@ -710,47 +664,57 @@ ilk_update_plane(struct drm_plane *plane,
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- dvscntr |= DVS_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
if (rotation & DRM_ROTATE_180)
dvscntr |= DVS_ROTATE_180;
- if (IS_GEN6(dev_priv))
- dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
+ return dvscntr;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int pipe = intel_plane->pipe;
+ u32 dvscntr = plane_state->ctl, dvsscale = 0;
+ u32 dvssurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- dvsscale = 0;
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -981,6 +945,26 @@ intel_check_sprite_plane(struct drm_plane *plane,
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = vlv_sprite_ctl(crtc_state, state);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ivb_sprite_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ilk_sprite_ctl(crtc_state, state);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index d15a7d9d4eb0..c117424f1f50 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -26,6 +26,19 @@
#include "intel_uc.h"
#include <linux/firmware.h>
+/* Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
@@ -83,23 +96,166 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->guc.send_mutex);
+ struct intel_guc *guc = &dev_priv->guc;
+
+ mutex_init(&guc->send_mutex);
+ guc->send = intel_guc_send_mmio;
+}
+
+static void fetch_uc_fw(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ if (!uc_fw->path)
+ return;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err)
+ goto fail;
+ if (!fw)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_NOTE("Firmware header is missing\n");
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_NOTE("CSS header definition mismatch\n");
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_NOTE("RSA key size is bad\n");
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_NOTE("Missing firmware components\n");
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("Skipping %s firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
+
+ release_firmware(fw);
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ return;
+
+fail:
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- if (dev_priv->huc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+}
- if (dev_priv->guc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
+{
+ __intel_uc_fw_fini(&dev_priv->guc.fw);
+ __intel_uc_fw_fini(&dev_priv->huc.fw);
}
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
int ret, attempts;
- /* GuC not enabled, nothing to do */
if (!i915.enable_guc_loading)
return 0;
@@ -109,9 +265,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
i915_ggtt_enable_guc(dev_priv);
if (i915.enable_guc_submission) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
ret = i915_guc_submission_init(dev_priv);
if (ret)
- goto err;
+ goto err_guc;
}
/* WaEnableuKernelHeaderValidFix:skl */
@@ -150,7 +310,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
ret = i915_guc_submission_enable(dev_priv);
if (ret)
- goto err_submission;
+ goto err_interrupts;
}
return 0;
@@ -164,11 +324,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* nonfatal error (i.e. it doesn't prevent driver load, but
* marks the GPU as wedged until reset).
*/
+err_interrupts:
+ gen9_disable_guc_interrupts(dev_priv);
err_submission:
if (i915.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
-
-err:
+err_guc:
i915_ggtt_disable_guc(dev_priv);
DRM_ERROR("GuC init failed\n");
@@ -185,11 +346,24 @@ err:
return ret;
}
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_loading)
+ return;
+
+ if (i915.enable_guc_submission) {
+ i915_guc_submission_disable(dev_priv);
+ gen9_disable_guc_interrupts(dev_priv);
+ i915_guc_submission_fini(dev_priv);
+ }
+ i915_ggtt_disable_guc(dev_priv);
+}
+
/*
* Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command
*/
-static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
+static bool guc_recv(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -198,7 +372,10 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
return INTEL_GUC_RECV_IS_RESPONSE(val);
}
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 status;
@@ -209,7 +386,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
return -EINVAL;
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = action[0];
@@ -226,9 +403,9 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
* up to that length of time, then switch to a slower sleep-wait loop.
* No inte_guc_send command should ever take longer than 10ms.
*/
- ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+ ret = wait_for_us(guc_recv(guc, &status), 10);
if (ret)
- ret = wait_for(intel_guc_recv(guc, &status), 10);
+ ret = wait_for(guc_recv(guc, &status), 10);
if (status != INTEL_GUC_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
@@ -247,7 +424,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
}
dev_priv->guc.action_status = status;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
mutex_unlock(&guc->send_mutex);
return ret;
@@ -268,136 +445,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("Skipping uC firmware version check\n");
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index a35ededfaa40..4b7f73aeddac 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -34,7 +34,9 @@ struct drm_i915_gem_request;
/*
* This structure primarily describes the GEM object shared with the GuC.
- * The GEM object is held for the entire lifetime of our interaction with
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
@@ -44,7 +46,7 @@ struct drm_i915_gem_request;
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process decriptor" which holds sequence data for
+ * kmap'd) includes the "process descriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
@@ -72,13 +74,12 @@ struct i915_guc_client {
uint32_t engines; /* bitmap of (host) engine ids */
uint32_t priority;
- uint32_t ctx_index;
+ u32 stage_id;
uint32_t proc_desc_offset;
- uint32_t doorbell_offset;
- uint32_t doorbell_cookie;
- uint16_t doorbell_id;
- uint16_t padding[3]; /* Maintain alignment */
+ u16 doorbell_id;
+ unsigned long doorbell_offset;
+ u32 doorbell_cookie;
spinlock_t wq_lock;
uint32_t wq_offset;
@@ -100,11 +101,40 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_SUCCESS
};
+/* User-friendly representation of an enum */
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_NONE:
+ return "NONE";
+ case INTEL_UC_FIRMWARE_PENDING:
+ return "PENDING";
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ return "SUCCESS";
+ }
+ return "<invalid>";
+}
+
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC,
INTEL_UC_FW_TYPE_HUC
};
+/* User-friendly representation of an enum */
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
@@ -133,11 +163,13 @@ struct intel_uc_fw {
struct intel_guc_log {
uint32_t flags;
struct i915_vma *vma;
- void *buf_addr;
- struct workqueue_struct *flush_wq;
- struct work_struct flush_work;
- struct rchan *relay_chan;
-
+ /* The runtime stuff gets created only when GuC logging gets enabled */
+ struct {
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+ } runtime;
/* logging related stats */
u32 capture_miss_count;
u32 flush_interrupt_count;
@@ -154,12 +186,13 @@ struct intel_guc {
bool interrupts_enabled;
struct i915_vma *ads_vma;
- struct i915_vma *ctx_pool_vma;
- struct ida ctx_ids;
+ struct i915_vma *stage_desc_pool;
+ void *stage_desc_pool_vaddr;
+ struct ida stage_ids;
struct i915_guc_client *execbuf_client;
- DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
+ DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
/* Action status & statistics */
@@ -174,6 +207,9 @@ struct intel_guc {
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
+
+ /* GuC's FW specific send function */
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
};
struct intel_huc {
@@ -187,17 +223,19 @@ struct intel_huc {
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return guc->send(guc, action, len);
+}
/* intel_guc_loader.c */
int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc);
-void intel_guc_fini(struct drm_i915_private *dev_priv);
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
@@ -212,10 +250,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
/* intel_guc_log.c */
-void intel_guc_log_create(struct intel_guc *guc);
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{
@@ -227,7 +266,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
/* intel_huc.c */
void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_fini(struct drm_i915_private *dev_priv);
int intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 09f5f02d7901..6d1ea26b2493 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -52,10 +52,10 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
}
static inline void
-fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- WARN_ON(!i915_mmio_reg_valid(d->reg_set));
- __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
}
static inline void
@@ -69,9 +69,10 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
@@ -79,15 +80,17 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
}
static inline void
-fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
@@ -95,72 +98,59 @@ fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
-}
-
-static inline void
-fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
-{
- /* something from same cacheline, but not from the set register */
- if (i915_mmio_reg_valid(d->reg_post))
- __raw_posting_read(d->i915, d->reg_post);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
}
static void
-fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_wait_ack_clear(d);
- fw_domain_get(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear(i915, d);
+ fw_domain_get(i915, d);
}
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_wait_ack(d);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack(i915, d);
- dev_priv->uncore.fw_domains_active |= fw_domains;
+ i915->uncore.fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_put(d);
- fw_domain_posting_read(d);
- }
-
- dev_priv->uncore.fw_domains_active &= ~fw_domains;
-}
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
-static void
-fw_domains_posting_read(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore_forcewake_domain *d;
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_put(i915, d);
- /* No need to do for all, just do for first found */
- for_each_fw_domain(d, dev_priv) {
- fw_domain_posting_read(d);
- break;
- }
+ i915->uncore.fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_reset(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- if (dev_priv->uncore.fw_domains == 0)
+ if (!fw_domains)
return;
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_reset(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
- fw_domains_posting_read(dev_priv);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_reset(i915, d);
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
@@ -236,7 +226,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv = domain->i915;
+ struct drm_i915_private *dev_priv =
+ container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
unsigned long irqflags;
assert_rpm_device_not_suspended(dev_priv);
@@ -266,9 +257,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
* timers are run before holding.
*/
while (1) {
+ unsigned int tmp;
+
active_domains = 0;
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -277,7 +270,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
@@ -300,7 +293,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- fw_domains_reset(dev_priv, FORCEWAKE_ALL);
+ fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
if (restore) { /* If reset with a user forcewake, try to restore */
if (fw)
@@ -457,13 +450,13 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
if (domain->wake_count++)
fw_domains &= ~domain->mask;
- }
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -520,10 +513,11 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -928,8 +922,11 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -1141,41 +1138,27 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
WARN_ON(d->wake_count);
+ WARN_ON(!i915_mmio_reg_valid(reg_set));
+ WARN_ON(!i915_mmio_reg_valid(reg_ack));
+
d->wake_count = 0;
d->reg_set = reg_set;
d->reg_ack = reg_ack;
- if (IS_GEN6(dev_priv)) {
- d->val_reset = 0;
- d->val_set = FORCEWAKE_KERNEL;
- d->val_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- d->val_reset = _MASKED_BIT_DISABLE(0xffff);
- d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- d->reg_post = FORCEWAKE_ACK_VLV;
- else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
- d->reg_post = ECOBUS;
-
- d->i915 = dev_priv;
d->id = domain_id;
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
- d->mask = 1 << domain_id;
+ d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= (1 << domain_id);
+ dev_priv->uncore.fw_domains |= BIT(domain_id);
- fw_domain_reset(d);
+ fw_domain_reset(dev_priv, d);
}
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
@@ -1183,6 +1166,17 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
+ if (IS_GEN6(dev_priv)) {
+ dev_priv->uncore.fw_reset = 0;
+ dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
+ dev_priv->uncore.fw_clear = 0;
+ } else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
+ dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1246,9 +1240,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 926b24c117d6..98b7aac41eec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -291,8 +291,6 @@ static int begin_live_test(struct live_test *t,
return err;
}
- i915_gem_retire_requests(i915);
-
i915->gpu_error.missed_irq_rings = 0;
t->reset_count = i915_reset_count(&i915->gpu_error);
@@ -303,7 +301,9 @@ static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
- if (wait_for(intel_engines_are_idle(i915), 1)) {
+ i915_gem_retire_requests(i915);
+
+ if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 6ec7c731a267..aa31d6c0cdfb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -235,7 +235,6 @@ static void hang_fini(struct hang *h)
i915_gem_object_put(h->hws);
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
- i915_gem_retire_requests(h->i915);
}
static int igt_hang_sanitycheck(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 8d5ba037064c..0ad624a1db90 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -118,7 +118,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
INIT_LIST_HEAD(&ring->request_list);
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
return ring;
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index eb2cda8e2b9f..1cc5d2931753 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,13 @@ static unsigned int random(unsigned long n,
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static inline bool page_contiguous(struct page *first,
+ struct page *last,
+ unsigned long npages)
+{
+ return first + npages == last;
+}
+
static int alloc_table(struct pfn_table *pt,
unsigned long count, unsigned long max,
npages_fn_t npages_fn,
@@ -216,7 +223,9 @@ static int alloc_table(struct pfn_table *pt,
unsigned long npages = npages_fn(n, count, rnd);
/* Nobody expects the Sparse Memmap! */
- if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) {
+ if (!page_contiguous(pfn_to_page(pfn),
+ pfn_to_page(pfn + npages),
+ npages)) {
sg_free_table(&pt->st);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index f2c9ae822149..c9e439c82241 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -31,13 +31,6 @@ config DRM_IMX_LDB
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
-config DRM_IMX_IPUV3
- tristate
- depends on DRM_IMX
- depends on IMX_IPUV3_CORE
- default y if DRM_IMX=y
- default m if DRM_IMX=m
-
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index f3ecd8903d97..16ecef33e008 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,5 @@
-imxdrm-objs := imx-drm-core.o
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
@@ -7,6 +7,5 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1888bf3920fc..50add2f9e250 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -422,7 +422,23 @@ static struct platform_driver imx_drm_pdrv = {
.of_match_table = imx_drm_dt_ids,
},
};
-module_platform_driver(imx_drm_pdrv);
+
+static struct platform_driver * const drivers[] = {
+ &imx_drm_pdrv,
+ &ipu_drm_driver,
+};
+
+static int __init imx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(imx_drm_init);
+
+static void __exit imx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(imx_drm_exit);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 295434b199db..f6dd64be9cd5 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -29,6 +29,8 @@ int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
int imx_drm_exit_drm(void);
+extern struct platform_driver ipu_drm_driver;
+
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dab9d50ffd8c..5456c15d962c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -465,16 +465,10 @@ static int ipu_drm_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver ipu_drm_driver = {
+struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
-module_platform_driver(ipu_drm_driver);
-
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-crtc");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index c70310206ac5..a14d7d64d7b1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -35,18 +35,28 @@
#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
-#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n))
+#define DISP_REG_OVL_ADDR_MT2701 0x0040
+#define DISP_REG_OVL_ADDR_MT8173 0x0f40
+#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
#define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_CLRFMT_RGB565 (0 << 12)
-#define OVL_CON_CLRFMT_RGB888 (1 << 12)
+#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ 0 : OVL_CON_CLRFMT_RGB)
+#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+struct mtk_disp_ovl_data {
+ unsigned int addr;
+ bool fmt_rgb565_is_0;
+};
+
/**
* struct mtk_disp_ovl - DISP_OVL driver structure
* @ddp_comp - structure containing type enum and hardware resources
@@ -55,8 +65,14 @@
struct mtk_disp_ovl {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_ovl_data *data;
};
+static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_ovl, ddp_comp);
+}
+
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
@@ -76,20 +92,18 @@ static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = crtc;
+ ovl->crtc = crtc;
writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = NULL;
+ ovl->crtc = NULL;
writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
}
@@ -138,18 +152,18 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
- return OVL_CON_CLRFMT_RGB565;
+ return OVL_CON_CLRFMT_RGB565(ovl);
case DRM_FORMAT_BGR565:
- return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGB888:
- return OVL_CON_CLRFMT_RGB888;
+ return OVL_CON_CLRFMT_RGB888(ovl);
case DRM_FORMAT_BGR888:
- return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return OVL_CON_CLRFMT_ARGB8888;
@@ -168,6 +182,7 @@ static unsigned int ovl_fmt_convert(unsigned int fmt)
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state)
{
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -179,7 +194,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
if (!pending->enable)
mtk_ovl_layer_off(comp, idx);
- con = ovl_fmt_convert(fmt);
+ con = ovl_fmt_convert(ovl, fmt);
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
@@ -187,7 +202,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx));
+ writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
if (pending->enable)
mtk_ovl_layer_on(comp, idx);
@@ -264,6 +279,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -287,8 +304,21 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT2701,
+ .fmt_rgb565_is_0 = false,
+};
+
+static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .fmt_rgb565_is_0 = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-ovl", },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = &mt2701_ovl_driver_data},
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = &mt8173_ovl_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0df05f95b916..b68a51376f83 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -38,6 +38,11 @@
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
+#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+
+struct mtk_disp_rdma_data {
+ unsigned int fifo_size;
+};
/**
* struct mtk_disp_rdma - DISP_RDMA driver structure
@@ -47,8 +52,14 @@
struct mtk_disp_rdma {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_rdma_data *data;
};
+static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_rdma, ddp_comp);
+}
+
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
@@ -77,20 +88,18 @@ static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = crtc;
+ rdma->crtc = crtc;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = NULL;
+ rdma->crtc = NULL;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
}
@@ -111,6 +120,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
{
unsigned int threshold;
unsigned int reg;
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
@@ -123,7 +133,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
*/
threshold = width * height * vrefresh * 4 * 7 / 1000000;
reg = RDMA_FIFO_UNDERFLOW_EN |
- RDMA_FIFO_PSEUDO_SIZE(SZ_8K) |
+ RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
@@ -208,6 +218,8 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_rdma_component_ops);
@@ -224,8 +236,19 @@ static int mtk_disp_rdma_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
+ .fifo_size = SZ_4K,
+};
+
+static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
+ .fifo_size = SZ_8K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-rdma", },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = &mt2701_rdma_driver_data},
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = &mt8173_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 69982f5a6198..6b08774e5501 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -327,6 +327,42 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
pm_runtime_put(drm->dev);
}
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ unsigned int i;
+
+ /*
+ * TODO: instead of updating the registers here, we should prepare
+ * working registers in atomic_commit and let the hardware command
+ * queue update module registers on vblank.
+ */
+ if (state->pending_config) {
+ mtk_ddp_comp_config(ovl, state->pending_width,
+ state->pending_height,
+ state->pending_vrefresh, 0);
+
+ state->pending_config = false;
+ }
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.config) {
+ mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ plane_state->pending.config = false;
+ }
+ }
+ mtk_crtc->pending_planes = false;
+ }
+}
+
static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -403,6 +439,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0;
int i;
@@ -424,6 +461,12 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -471,36 +514,10 @@ err_cleanup_crtc:
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- unsigned int i;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
- /*
- * TODO: instead of updating the registers here, we should prepare
- * working registers in atomic_commit and let the hardware command
- * queue update module registers on vblank.
- */
- if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
- state->pending_height,
- state->pending_vrefresh, 0);
-
- state->pending_config = false;
- }
-
- if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
-
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
- plane_state->pending.config = false;
- }
- }
- mtk_crtc->pending_planes = false;
- }
+ if (!priv->data->shadow_register)
+ mtk_crtc_ddp_config(crtc);
mtk_drm_finish_page_flip(mtk_crtc);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 17ba9355a49c..8130f3dab661 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -31,26 +32,40 @@
#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MUTEX_MOD_DISP_AAL BIT(20)
-#define MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MUTEX_MOD_DISP_UFOE BIT(22)
-#define MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MUTEX_MOD_DISP_OD BIT(25)
+#define INT_MUTEX BIT(1)
+
+#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
+#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
+#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
+#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
+#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
+#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
+#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
+#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
+#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
+#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
+#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
+#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
+#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
+#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
+
+#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
+#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
+#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
+#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
+#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
+#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
@@ -67,6 +82,10 @@
#define DPI0_SEL_IN_RDMA1 0x1
#define COLOR1_SEL_IN_OVL1 0x1
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define DSI_SEL_IN_BLS 0x0
+
struct mtk_disp_mutex {
int id;
bool claimed;
@@ -77,24 +96,34 @@ struct mtk_ddp {
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
+ const unsigned int *mutex_mod;
+};
+
+static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
+ [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
+ [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
+ [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
-static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL,
- [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1,
- [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD,
- [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0,
- [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1,
- [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0,
- [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1,
- [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0,
- [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1,
- [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2,
- [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE,
- [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0,
- [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1,
+static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
@@ -106,6 +135,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
*addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
} else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
@@ -143,6 +175,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
} else {
value = 0;
}
@@ -150,6 +185,15 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
return value;
}
+static void mtk_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+}
+
void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
@@ -162,6 +206,8 @@ void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
writel_relaxed(reg, config_regs + addr);
}
+ mtk_ddp_sout_sel(config_regs, cur, next);
+
value = mtk_ddp_sel_in(cur, next, &addr);
if (value) {
reg = readl_relaxed(config_regs + addr) | value;
@@ -247,7 +293,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= mutex_mod[id];
+ reg |= ddp->mutex_mod[id];
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
return;
}
@@ -273,7 +319,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~mutex_mod[id];
+ reg &= ~(ddp->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
break;
}
@@ -299,6 +345,27 @@ void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
}
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ u32 tmp;
+
+ writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
+ if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
+ tmp, tmp & INT_MUTEX, 1, 10000))
+ pr_err("could not acquire mutex %d\n", mutex->id);
+}
+
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
+}
+
static int mtk_ddp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -326,6 +393,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
+ ddp->mutex_mod = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, ddp);
return 0;
@@ -337,7 +406,8 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-mutex" },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 92c11752ff65..f9a799168077 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -37,5 +37,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
enum mtk_ddp_comp_id id);
void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 48cc01fd20c7..8b52416b6e41 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -39,9 +39,11 @@
#define DISP_REG_UFO_START 0x0000
#define DISP_COLOR_CFG_MAIN 0x0400
-#define DISP_COLOR_START 0x0c00
-#define DISP_COLOR_WIDTH 0x0c50
-#define DISP_COLOR_HEIGHT 0x0c54
+#define DISP_COLOR_START_MT2701 0x0f00
+#define DISP_COLOR_START_MT8173 0x0c00
+#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
+#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
+#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
@@ -80,6 +82,20 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+struct mtk_disp_color_data {
+ unsigned int color_offset;
+};
+
+struct mtk_disp_color {
+ struct mtk_ddp_comp ddp_comp;
+ const struct mtk_disp_color_data *data;
+};
+
+static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_color, ddp_comp);
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG)
{
@@ -107,15 +123,19 @@ static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
{
- writel(w, comp->regs + DISP_COLOR_WIDTH);
- writel(h, comp->regs + DISP_COLOR_HEIGHT);
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(w, comp->regs + DISP_COLOR_WIDTH(color));
+ writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START);
+ writel(0x1, comp->regs + DISP_COLOR_START(color));
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
@@ -236,6 +256,7 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_PWM] = "pwm",
[MTK_DISP_MUTEX] = "mutex",
[MTK_DISP_OD] = "od",
+ [MTK_DISP_BLS] = "bls",
};
struct mtk_ddp_comp_match {
@@ -246,6 +267,7 @@ struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
@@ -264,6 +286,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
+static const struct mtk_disp_color_data mt2701_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT2701,
+};
+
+static const struct mtk_disp_color_data mt8173_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT8173,
+};
+
+static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = &mt2701_color_driver_data},
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = &mt8173_color_driver_data},
+ {},
+};
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
@@ -286,14 +324,29 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+ const struct of_device_id *match;
+ struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
+ type = mtk_ddp_matches[comp_id].type;
+ if (type == MTK_DISP_COLOR) {
+ devm_kfree(dev, comp);
+ color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
+ if (!color)
+ return -ENOMEM;
+
+ match = of_match_node(mtk_disp_color_driver_dt_match, node);
+ color->data = match->data;
+ comp = &color->ddp_comp;
+ }
+
comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
- if (comp_id == DDP_COMPONENT_DPI0 ||
+ if (comp_id == DDP_COMPONENT_BLS ||
+ comp_id == DDP_COMPONENT_DPI0 ||
comp_id == DDP_COMPONENT_DSI0 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
@@ -308,8 +361,6 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
if (IS_ERR(comp->clk))
comp->clk = NULL;
- type = mtk_ddp_matches[comp_id].type;
-
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 22a33ee451c4..0828cf8bf85c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -36,11 +36,13 @@ enum mtk_ddp_comp_type {
MTK_DISP_PWM,
MTK_DISP_MUTEX,
MTK_DISP_OD,
+ MTK_DISP_BLS,
MTK_DDP_COMP_TYPE_MAX,
};
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL,
+ DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f5a1fd9b3ecc..f6c8ec4c7dbc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -128,7 +128,20 @@ static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.atomic_commit = mtk_atomic_commit,
};
-static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_AAL,
@@ -139,7 +152,7 @@ static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
DDP_COMPONENT_PWM0,
};
-static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL1,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_GAMMA,
@@ -147,6 +160,21 @@ static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .main_path = mt2701_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
+ .ext_path = mt2701_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
+ .shadow_register = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .main_path = mt8173_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
+ .ext_path = mt8173_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -189,17 +217,19 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* and each statically assigned to a crtc:
* OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
*/
- ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main));
+ ret = mtk_drm_crtc_create(drm, private->data->main_path,
+ private->data->main_len);
if (ret < 0)
goto err_component_unbind;
/* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
- ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext));
+ ret = mtk_drm_crtc_create(drm, private->data->ext_path,
+ private->data->ext_len);
if (ret < 0)
goto err_component_unbind;
/* Use OVL device for all DMA memory allocations */
- np = private->comp_node[mtk_ddp_main[0]] ?:
- private->comp_node[mtk_ddp_ext[0]];
+ np = private->comp_node[private->data->main_path[0]] ?:
+ private->comp_node[private->data->ext_path[0]];
pdev = of_find_device_by_node(np);
if (!pdev) {
ret = -ENODEV;
@@ -328,16 +358,22 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
{ }
@@ -359,6 +395,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
mutex_init(&private->commit.lock);
INIT_WORK(&private->commit.work, mtk_atomic_work);
+ private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
private->config_regs = devm_ioremap_resource(dev, mem);
@@ -510,7 +547,10 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt8173-mmsys", },
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
{ }
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index df322a7a5fcb..aef8747d810b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -28,6 +28,14 @@ struct drm_fb_helper;
struct drm_property;
struct regmap;
+struct mtk_mmsys_driver_data {
+ const enum mtk_ddp_comp_id *main_path;
+ unsigned int main_len;
+ const enum mtk_ddp_comp_id *ext_path;
+ unsigned int ext_len;
+ bool shadow_register;
+};
+
struct mtk_drm_private {
struct drm_device *drm;
struct device *dma_dev;
@@ -39,6 +47,7 @@ struct mtk_drm_private {
void __iomem *config_regs;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+ const struct mtk_mmsys_driver_data *data;
struct {
struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4d837b29ee9e..808b995a990f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,19 +19,28 @@
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <video/mipi_display.h>
#include <video/videomode.h>
#include "mtk_drm_ddp_comp.h"
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
#define DSI_START 0x00
+#define DSI_INTEN 0x08
+
+#define DSI_INTSTA 0x0c
+#define LPRX_RD_RDY_INT_FLAG BIT(0)
+#define CMD_DONE_INT_FLAG BIT(1)
+#define TE_RDY_INT_FLAG BIT(2)
+#define VM_DONE_INT_FLAG BIT(3)
+#define EXT_TE_RDY_INT_FLAG BIT(4)
+#define DSI_BUSY BIT(31)
+
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
@@ -46,7 +55,7 @@
#define MIX_MODE BIT(17)
#define DSI_TXRX_CTRL 0x18
-#define VC_NUM (2 << 0)
+#define VC_NUM BIT(1)
#define LANE_NUM (0xf << 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
@@ -72,8 +81,19 @@
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define DSI_CMDQ_SIZE 0x60
+#define CMDQ_SIZE 0x3f
+
#define DSI_HSTX_CKL_WC 0x64
+#define DSI_RX_DATA0 0x74
+#define DSI_RX_DATA1 0x78
+#define DSI_RX_DATA2 0x7c
+#define DSI_RX_DATA3 0x80
+
+#define DSI_RACK 0x84
+#define RACK BIT(0)
+
#define DSI_PHY_LCCON 0x104
#define LC_HS_TX_EN BIT(0)
#define LC_ULPM_EN BIT(1)
@@ -106,6 +126,19 @@
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define DSI_VM_CMD_CON 0x130
+#define VM_CMD_EN BIT(0)
+#define TS_VFP_EN BIT(5)
+
+#define DSI_CMDQ0 0x180
+#define CONFIG (0xff << 0)
+#define SHORT_PACKET 0
+#define LONG_PACKET 2
+#define BTA BIT(2)
+#define DATA_ID (0xff << 8)
+#define DATA_0 (0xff << 16)
+#define DATA_1 (0xff << 24)
+
#define T_LPX 5
#define T_HS_PREP 6
#define T_HS_TRAIL 8
@@ -114,6 +147,12 @@
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
+#define MTK_DSI_HOST_IS_READ(type) \
+ ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
+ (type == MIPI_DSI_DCS_READ))
+
struct phy;
struct mtk_dsi {
@@ -140,6 +179,8 @@ struct mtk_dsi {
struct videomode vm;
int refcount;
bool enabled;
+ u32 irq_data;
+ wait_queue_head_t irq_wait_queue;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -164,7 +205,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
writel((temp & ~mask) | (data & mask), dsi->regs + offset);
}
-static void dsi_phy_timconfig(struct mtk_dsi *dsi)
+static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
@@ -196,118 +237,39 @@ static void mtk_dsi_disable(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
}
-static void mtk_dsi_reset(struct mtk_dsi *dsi)
+static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
-static int mtk_dsi_poweron(struct mtk_dsi *dsi)
-{
- struct device *dev = dsi->dev;
- int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
-
- if (++dsi->refcount != 1)
- return 0;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB565:
- bit_per_pixel = 16;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- bit_per_pixel = 18;
- break;
- case MIPI_DSI_FMT_RGB666:
- case MIPI_DSI_FMT_RGB888:
- default:
- bit_per_pixel = 24;
- break;
- }
-
- /**
- * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock * 1000;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
-
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
- if (ret < 0) {
- dev_err(dev, "Failed to set data rate: %d\n", ret);
- goto err_refcount;
- }
-
- phy_power_on(dsi->phy);
-
- ret = clk_prepare_enable(dsi->engine_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable engine clock: %d\n", ret);
- goto err_phy_power_off;
- }
-
- ret = clk_prepare_enable(dsi->digital_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable digital clock: %d\n", ret);
- goto err_disable_engine_clk;
- }
-
- mtk_dsi_enable(dsi);
- mtk_dsi_reset(dsi);
- dsi_phy_timconfig(dsi);
-
- return 0;
-
-err_disable_engine_clk:
- clk_disable_unprepare(dsi->engine_clk);
-err_phy_power_off:
- phy_power_off(dsi->phy);
-err_refcount:
- dsi->refcount--;
- return ret;
-}
-
-static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
}
-static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
}
-static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
}
-static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
}
-static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
+static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
u32 tmp_reg1;
@@ -315,30 +277,37 @@ static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
}
-static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
{
- if (enter && !dsi_clk_hs_state(dsi))
+ if (enter && !mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
- else if (!enter && dsi_clk_hs_state(dsi))
+ else if (!enter && mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
}
-static void dsi_set_mode(struct mtk_dsi *dsi)
+static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
{
u32 vid_mode = CMD_MODE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- vid_mode = SYNC_PULSE_MODE;
-
- if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
vid_mode = BURST_MODE;
+ else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ vid_mode = SYNC_PULSE_MODE;
+ else
+ vid_mode = SYNC_EVENT_MODE;
}
writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
}
-static void dsi_ps_control_vact(struct mtk_dsi *dsi)
+static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+}
+
+static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
u32 dsi_buf_bpp, ps_wc;
@@ -372,7 +341,7 @@ static void dsi_ps_control_vact(struct mtk_dsi *dsi)
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
-static void dsi_rxtx_control(struct mtk_dsi *dsi)
+static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
u32 tmp_reg;
@@ -394,12 +363,15 @@ static void dsi_rxtx_control(struct mtk_dsi *dsi)
break;
}
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
+
writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
}
-static void dsi_ps_control(struct mtk_dsi *dsi)
+static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
{
- unsigned int dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp;
u32 tmp_reg;
switch (dsi->format) {
@@ -429,12 +401,12 @@ static void dsi_ps_control(struct mtk_dsi *dsi)
writel(tmp_reg, dsi->regs + DSI_PSCTRL);
}
-static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
{
- unsigned int horizontal_sync_active_byte;
- unsigned int horizontal_backporch_byte;
- unsigned int horizontal_frontporch_byte;
- unsigned int dsi_tmp_buf_bpp;
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 dsi_tmp_buf_bpp;
struct videomode *vm = &dsi->vm;
@@ -463,7 +435,7 @@ static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
- dsi_ps_control(dsi);
+ mtk_dsi_ps_control(dsi);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
@@ -472,6 +444,184 @@ static void mtk_dsi_start(struct mtk_dsi *dsi)
writel(1, dsi->regs + DSI_START);
}
+static void mtk_dsi_stop(struct mtk_dsi *dsi)
+{
+ writel(0, dsi->regs + DSI_START);
+}
+
+static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
+{
+ writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
+}
+
+static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
+{
+ u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ writel(inten, dsi->regs + DSI_INTEN);
+}
+
+static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data |= irq_bit;
+}
+
+static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data &= ~irq_bit;
+}
+
+static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
+ unsigned int timeout)
+{
+ s32 ret = 0;
+ unsigned long jiffies = msecs_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
+ dsi->irq_data & irq_flag,
+ jiffies);
+ if (ret == 0) {
+ DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+
+ return ret;
+}
+
+static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
+{
+ struct mtk_dsi *dsi = dev_id;
+ u32 status, tmp;
+ u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ status = readl(dsi->regs + DSI_INTSTA) & flag;
+
+ if (status) {
+ do {
+ mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
+ tmp = readl(dsi->regs + DSI_INTSTA);
+ } while (tmp & DSI_BUSY);
+
+ mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
+ mtk_dsi_irq_data_set(dsi, status);
+ wake_up_interruptible(&dsi->irq_wait_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
+{
+ mtk_dsi_irq_data_clear(dsi, irq_flag);
+ mtk_dsi_set_cmd_mode(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
+ DRM_ERROR("failed to switch cmd mode\n");
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+
+ if (++dsi->refcount != 1)
+ return 0;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
+ /**
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
+ */
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
+
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
+
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set data rate: %d\n", ret);
+ goto err_refcount;
+ }
+
+ phy_power_on(dsi->phy);
+
+ ret = clk_prepare_enable(dsi->engine_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable engine clock: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = clk_prepare_enable(dsi->digital_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable digital clock: %d\n", ret);
+ goto err_disable_engine_clk;
+ }
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_phy_timconfig(dsi);
+
+ mtk_dsi_rxtx_control(dsi);
+ mtk_dsi_ps_control_vact(dsi);
+ mtk_dsi_set_vm_cmd(dsi);
+ mtk_dsi_config_vdo_timing(dsi);
+ mtk_dsi_set_interrupt_enable(dsi);
+
+ mtk_dsi_clk_ulp_mode_leave(dsi);
+ mtk_dsi_lane0_ulp_mode_leave(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 0);
+
+ if (dsi->panel) {
+ if (drm_panel_prepare(dsi->panel)) {
+ DRM_ERROR("failed to prepare the panel\n");
+ goto err_disable_digital_clk;
+ }
+ }
+
+ return 0;
+err_disable_digital_clk:
+ clk_disable_unprepare(dsi->digital_clk);
+err_disable_engine_clk:
+ clk_disable_unprepare(dsi->engine_clk);
+err_phy_power_off:
+ phy_power_off(dsi->phy);
+err_refcount:
+ dsi->refcount--;
+ return ret;
+}
+
static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
{
if (WARN_ON(dsi->refcount == 0))
@@ -480,8 +630,18 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
- dsi_lane0_ulp_mode_enter(dsi);
- dsi_clk_ulp_mode_enter(dsi);
+ if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
+ if (dsi->panel) {
+ if (drm_panel_unprepare(dsi->panel)) {
+ DRM_ERROR("failed to unprepare the panel\n");
+ return;
+ }
+ }
+ }
+
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_lane0_ulp_mode_enter(dsi);
+ mtk_dsi_clk_ulp_mode_enter(dsi);
mtk_dsi_disable(dsi);
@@ -498,35 +658,30 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
if (dsi->enabled)
return;
- if (dsi->panel) {
- if (drm_panel_prepare(dsi->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return;
- }
- }
-
ret = mtk_dsi_poweron(dsi);
if (ret < 0) {
DRM_ERROR("failed to power on dsi\n");
return;
}
- dsi_rxtx_control(dsi);
-
- dsi_clk_ulp_mode_leave(dsi);
- dsi_lane0_ulp_mode_leave(dsi);
- dsi_clk_hs_mode(dsi, 0);
- dsi_set_mode(dsi);
-
- dsi_ps_control_vact(dsi);
- dsi_config_vdo_timing(dsi);
-
- dsi_set_mode(dsi);
- dsi_clk_hs_mode(dsi, 1);
+ mtk_dsi_set_mode(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 1);
mtk_dsi_start(dsi);
+ if (dsi->panel) {
+ if (drm_panel_enable(dsi->panel)) {
+ DRM_ERROR("failed to enable the panel\n");
+ goto err_dsi_power_off;
+ }
+ }
+
dsi->enabled = true;
+
+ return;
+err_dsi_power_off:
+ mtk_dsi_stop(dsi);
+ mtk_dsi_poweroff(dsi);
}
static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
@@ -541,6 +696,7 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
}
+ mtk_dsi_stop(dsi);
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
@@ -742,9 +898,149 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
+{
+ u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
+
+ while (timeout_ms--) {
+ if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
+ break;
+
+ usleep_range(2, 4);
+ }
+
+ if (timeout_ms == 0) {
+ DRM_WARN("polling dsi wait not busy timeout!\n");
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+}
+
+static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
+{
+ switch (type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ return 1;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ return read_data[1] + read_data[2] * 16;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ DRM_INFO("type is 0x02, try again\n");
+ break;
+ default:
+ DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
+{
+ const char *tx_buf = msg->tx_buf;
+ u8 config, cmdq_size, cmdq_off, type = msg->type;
+ u32 reg_val, cmdq_mask, i;
+
+ if (MTK_DSI_HOST_IS_READ(type))
+ config = BTA;
+ else
+ config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
+
+ if (msg->tx_len > 2) {
+ cmdq_size = 1 + (msg->tx_len + 3) / 4;
+ cmdq_off = 4;
+ cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
+ reg_val = (msg->tx_len << 16) | (type << 8) | config;
+ } else {
+ cmdq_size = 1;
+ cmdq_off = 2;
+ cmdq_mask = CONFIG | DATA_ID;
+ reg_val = (type << 8) | config;
+ }
+
+ for (i = 0; i < msg->tx_len; i++)
+ writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+
+ mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
+}
+
+static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
+ const struct mipi_dsi_msg *msg, u8 flag)
+{
+ mtk_dsi_wait_for_idle(dsi);
+ mtk_dsi_irq_data_clear(dsi, flag);
+ mtk_dsi_cmdq(dsi, msg);
+ mtk_dsi_start(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
+ return -ETIME;
+ else
+ return 0;
+}
+
+static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+ u32 recv_cnt, i;
+ u8 read_data[16];
+ void *src_addr;
+ u8 irq_flag = CMD_DONE_INT_FLAG;
+
+ if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
+ DRM_ERROR("dsi engine is not command mode\n");
+ return -EINVAL;
+ }
+
+ if (MTK_DSI_HOST_IS_READ(msg->type))
+ irq_flag |= LPRX_RD_RDY_INT_FLAG;
+
+ if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
+ return -ETIME;
+
+ if (!MTK_DSI_HOST_IS_READ(msg->type))
+ return 0;
+
+ if (!msg->rx_buf) {
+ DRM_ERROR("dsi receive buffer size may be NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
+
+ recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
+
+ if (recv_cnt > 2)
+ src_addr = &read_data[4];
+ else
+ src_addr = &read_data[1];
+
+ if (recv_cnt > 10)
+ recv_cnt = 10;
+
+ if (recv_cnt > msg->rx_len)
+ recv_cnt = msg->rx_len;
+
+ if (recv_cnt)
+ memcpy(msg->rx_buf, src_addr, recv_cnt);
+
+ DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
+ recv_cnt, *((u8 *)(msg->tx_buf)));
+
+ return recv_cnt;
+}
+
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
.detach = mtk_dsi_host_detach,
+ .transfer = mtk_dsi_host_transfer,
};
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -802,6 +1098,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
struct resource *regs;
+ int irq_num;
int comp_id;
int ret;
@@ -866,6 +1163,22 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0) {
+ dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ return -EPROBE_DEFER;
+ }
+
+ irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
+ ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
+ IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
+ return -EPROBE_DEFER;
+ }
+
+ init_waitqueue_head(&dsi->irq_wait_queue);
+
platform_set_drvdata(pdev, dsi);
return component_add(&pdev->dev, &mtk_dsi_component_ops);
@@ -882,6 +1195,7 @@ static int mtk_dsi_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_dsi_of_match[] = {
+ { .compatible = "mediatek,mt2701-dsi" },
{ .compatible = "mediatek,mt8173-dsi" },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1c366f8cb2d0..90e913108950 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
@@ -87,6 +88,9 @@
#define MIPITX_DSI_PLL_CON2 0x58
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
@@ -123,10 +127,15 @@
#define SW_LNT2_HSTX_PRE_OE BIT(24)
#define SW_LNT2_HSTX_OE BIT(25)
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+};
+
struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
- unsigned int data_rate;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
};
@@ -163,7 +172,7 @@ static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- unsigned int txdiv, txdiv0, txdiv1;
+ u8 txdiv, txdiv0, txdiv1;
u64 pcw;
dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
@@ -243,6 +252,10 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
RG_DSI_MPPLL_SDM_SSC_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
return 0;
}
@@ -255,6 +268,9 @@ static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
RG_DSI_MPPLL_PLL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
RG_DSI_MPPLL_SDM_ISO_EN |
RG_DSI_MPPLL_SDM_PWR_ON,
@@ -310,7 +326,7 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
static int mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
@@ -341,7 +357,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
RG_DSI_PAD_TIE_LOW_EN);
@@ -391,6 +407,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
if (!mipi_tx)
return -ENOMEM;
+ mipi_tx->driver_data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -448,8 +465,19 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8)
+};
+
+static const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8)
+};
+
static const struct of_device_id mtk_mipi_tx_match[] = {
- { .compatible = "mediatek,mt8173-mipi-tx", },
+ { .compatible = "mediatek,mt2701-mipi-tx",
+ .data = &mt2701_mipitx_data },
+ { .compatible = "mediatek,mt8173-mipi-tx",
+ .data = &mt8173_mipitx_data },
{},
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 657598bb1e6b..565a217b46f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..36602ac7e248 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_bo)
+ if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
.idle = a5xx_idle,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
+#endif
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5ae65426b4e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
}
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- if (gpu->memptrs_bo) {
- if (gpu->memptrs)
- msm_gem_put_vaddr(gpu->memptrs_bo);
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (adreno_gpu->memptrs_bo) {
+ if (adreno_gpu->memptrs)
+ msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+ if (adreno_gpu->memptrs_iova)
+ msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+ drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+ }
+ release_firmware(adreno_gpu->pm4);
+ release_firmware(adreno_gpu->pfp);
- if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gpu_cleanup(gpu);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(gpu->aspace);
}
- release_firmware(gpu->pm4);
- release_firmware(gpu->pfp);
- msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
}
}
} else {
- msm_dsi_host_reset_phy(mdsi->host);
+ msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
#include <linux/hdmi.h>
#include "hdmi.h"
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
static int nchannels[] = { 2, 4, 6, 8 };
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..238901987e00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
#ifndef __MDP5_PIPE_H__
#define __MDP5_PIPE_H__
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX (SSPP_CURSOR1 + 1)
/* represents a hw pipe, which is dynamically assigned to a plane */
struct mdp5_hw_pipe {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
+ /* Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..af5b6ba4095b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace)
- msm_gem_address_space_destroy(gpu->aspace);
-
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
index fac0824197f1..bf3e532665fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
@@ -22,17 +22,14 @@
#ifndef __NVKM_CORE_MSGQUEUE_H
#define __NVKM_CORE_MSGQUEUE_H
-
-#include <core/os.h>
-
-struct nvkm_falcon;
+#include <subdev/secboot.h>
struct nvkm_msgqueue;
-enum nvkm_secboot_falcon;
/* Hopefully we will never have firmware arguments larger than that... */
#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
+int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
void nvkm_msgqueue_del(struct nvkm_msgqueue **);
void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
@@ -41,7 +38,6 @@ int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
- enum nvkm_secboot_falcon);
+int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index e5c9b6268dcc..7c7d91cad09a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -42,6 +42,10 @@ struct nvkm_device_tegra_func {
* Whether the chip requires a reference clock
*/
bool require_ref_clk;
+ /*
+ * Whether the chip requires the VDD regulator
+ */
+ bool require_vdd;
};
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 24efa900d8ca..f00527b36acc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -68,4 +68,5 @@ int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 0a636833e0eb..c7944b19bed8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -44,4 +44,6 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 891497a0fe3b..28d513fbf44c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -97,6 +97,7 @@ int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index c4ecf255ff39..6e2b70bd2f41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -7,4 +7,5 @@ int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index e68ba636741b..58f10890c3b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -29,4 +29,5 @@ int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index d6a4bdb6573b..59f3ba551681 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -55,10 +55,11 @@ struct nvkm_secboot {
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
-int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 548f36d33924..e427f80344c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 4c4cc2260257..1ada186fab77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -53,13 +53,21 @@ static int nouveau_platform_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
+ .require_vdd = true,
};
static const struct nvkm_device_tegra_func gm20b_platform_data = {
.iommu_bit = 34,
+ .require_vdd = true,
.require_ref_clk = true,
};
+static const struct nvkm_device_tegra_func gp10b_platform_data = {
+ .iommu_bit = 36,
+ /* power provided by generic PM domains */
+ .require_vdd = false,
+};
+
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
@@ -69,6 +77,10 @@ static const struct of_device_id nouveau_platform_match[] = {
.compatible = "nvidia,gm20b",
.data = &gm20b_platform_data,
},
+ {
+ .compatible = "nvidia,gp10b",
+ .data = &gp10b_platform_data,
+ },
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index bf504788bce6..0e58537352fe 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1002,7 +1002,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
{
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
__drm_atomic_helper_plane_destroy_state(&asyw->state);
- dma_fence_put(asyw->state.fence);
kfree(asyw);
}
@@ -1014,7 +1013,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
- asyw->state.fence = NULL;
asyw->interval = 1;
asyw->sema = armw->sema;
asyw->ntfy = armw->ntfy;
@@ -2043,6 +2041,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
u32 hfrontp = mode->hsync_start - mode->hdisplay;
u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ u32 blankus;
struct nv50_head_mode *m = &asyh->mode;
m->h.active = mode->htotal;
@@ -2056,9 +2055,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
m->v.blanks = m->v.active - vfrontp - 1;
/*XXX: Safe underestimate, even "0" works */
- m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
- m->v.blankus *= 1000;
- m->v.blankus /= mode->clock;
+ blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ blankus *= 1000;
+ blankus /= mode->clock;
+ m->v.blankus = blankus;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
m->v.blank2e = m->v.active + m->v.synce + vbackp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 1076949b802a..b690bc12a5b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
@@ -2201,8 +2201,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2215,6 +2213,8 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2235,8 +2235,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2249,6 +2247,8 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2269,8 +2269,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2283,6 +2281,65 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv137_chipset = {
+ .name = "GP107",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gp102_secboot_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+ .gr = gp107_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv13b_chipset = {
+ .name = "GP10B",
+ .bar = gk20a_bar_new,
+ .bus = gf100_bus_new,
+ .fb = gp10b_fb_new,
+ .fuse = gm107_fuse_new,
+ .ibus = gp10b_ibus_new,
+ .imem = gk20a_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp10b_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gp10b_secboot_new,
+ .pmu = gm20b_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[2] = gp102_ce_new,
+ .dma = gf119_dma_new,
+ .fifo = gp10b_fifo_new,
+ .gr = gp10b_gr_new,
.sw = gf100_sw_new,
};
@@ -2724,6 +2781,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
+ case 0x137: device->chip = &nv137_chipset; break;
+ case 0x13b: device->chip = &nv13b_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index f2bc0b7d9b93..6474bd2a6d07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -28,9 +28,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
int ret;
- ret = regulator_enable(tdev->vdd);
- if (ret)
- goto err_power;
+ if (tdev->vdd) {
+ ret = regulator_enable(tdev->vdd);
+ if (ret)
+ goto err_power;
+ }
ret = clk_prepare_enable(tdev->clk);
if (ret)
@@ -67,7 +69,8 @@ err_clk_pwr:
err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
- regulator_disable(tdev->vdd);
+ if (tdev->vdd)
+ regulator_disable(tdev->vdd);
err_power:
return ret;
}
@@ -75,6 +78,8 @@ err_power:
static int
nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
{
+ int ret;
+
reset_control_assert(tdev->rst);
udelay(10);
@@ -84,7 +89,13 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
clk_disable_unprepare(tdev->clk);
udelay(10);
- return regulator_disable(tdev->vdd);
+ if (tdev->vdd) {
+ ret = regulator_disable(tdev->vdd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void
@@ -264,10 +275,12 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
- tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd)) {
- ret = PTR_ERR(tdev->vdd);
- goto free;
+ if (func->require_vdd) {
+ tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
}
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 98651a43bc12..64e51838edf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -14,6 +14,7 @@ nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
+nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 679f3ec311e9..44bff98d6725 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -83,4 +83,5 @@ extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
extern const struct nvkm_enum gm107_fifo_fault_engine[];
+extern const struct nvkm_enum gp100_fifo_fault_engine[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index b2635aea9f6e..41f16cf5a918 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,7 +24,7 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_enum
+const struct nvkm_enum
gp100_fifo_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
new file mode 100644
index 000000000000..4af96c3e69ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct gk104_fifo_func
+gp10b_fifo = {
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .chan = {
+ &gp100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2938ad5aca40..8a22558b7b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -33,6 +33,8 @@ nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
+nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -52,3 +54,4 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
nvkm-y += nvkm/engine/gr/ctxgp100.o
nvkm-y += nvkm/engine/gr/ctxgp102.o
+nvkm-y += nvkm/engine/gr/ctxgp107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 0ae032fa2909..017180d147cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -106,6 +106,9 @@ void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gp100_grctx_generate_pagepool(struct gf100_grctx *);
extern const struct gf100_grctx_func gp102_grctx;
+void gp102_grctx_generate_attrib(struct gf100_grctx *);
+
+extern const struct gf100_grctx_func gp107_grctx;
/* context init value lists */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index ee26d64af73a..80b7ab0bee3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -29,7 +29,7 @@
* PGRAPH context implementation
******************************************************************************/
-static void
+void
gp102_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
new file mode 100644
index 000000000000..8da91a0b3bd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+const struct gf100_grctx_func
+gp107_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x300,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp102_grctx_generate_attrib,
+ .attrib_nr_max = 0x15de,
+ .attrib_nr = 0x540,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index a4410ef19db5..99689f4de502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1463,25 +1463,27 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
- int ret = 0;
+ u32 secboot_mask = 0;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
- if (ret)
- return ret;
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
- if (ret)
- return ret;
+
+ if (secboot_mask != 0) {
+ int ret = nvkm_secboot_reset(sb, secboot_mask);
+ if (ret)
+ return ret;
+ }
nvkm_mc_unk260(device, 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 1d2101af2a87..a36e45a4a635 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -125,6 +125,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_ppc_exceptions)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
+ void (*init_num_active_ltcs)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio;
struct {
@@ -301,4 +302,8 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr);
+
+void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 94ed7debb714..867a5f7cc5bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -40,6 +40,15 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
+void
+gp100_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+}
+
int
gp100_gr_init(struct gf100_gr *gr)
{
@@ -81,8 +90,7 @@ gp100_gr_init(struct gf100_gr *gr)
}
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
- nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
- nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+ gr->func->init_num_active_ltcs(gr);
gr->func->init_rop_active_fbps(gr);
if (gr->func->init_swdx_pes_mask)
@@ -154,6 +162,7 @@ gp100_gr = {
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 2,
.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 1d5117a16299..61e3a0b08559 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-static void
+void
gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -47,6 +47,7 @@ gp102_gr = {
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 3,
.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
new file mode 100644
index 000000000000..f7272323f694
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gp107_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 1,
+ .grctx = &gp107_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_B, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
new file mode 100644
index 000000000000..5f3d161a0842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+}
+
+static const struct gf100_gr_func
+gp10b_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 1,
+ .grctx = &gp102_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_A, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac915eaad..8a8895246d26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37e24b0..c3cf02ed468e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 982efedb4b13..d45d7947a964 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -463,26 +463,49 @@ nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
}
int
-nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
+nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
+ unsigned long falcon_mask)
{
- if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
+ unsigned long falcon;
+
+ if (!queue || !queue->func->acr_func)
return -ENODEV;
- return queue->func->acr_func->boot_falcon(queue, falcon);
+ /* Does the firmware support booting multiple falcons? */
+ if (queue->func->acr_func->boot_multiple_falcons)
+ return queue->func->acr_func->boot_multiple_falcons(queue,
+ falcon_mask);
+
+ /* Else boot all requested falcons individually */
+ if (!queue->func->acr_func->boot_falcon)
+ return -ENODEV;
+
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ int ret = queue->func->acr_func->boot_falcon(queue, falcon);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
+ const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
{
const struct nvkm_subdev *subdev = falcon->owner;
int ret = -EINVAL;
switch (version) {
case 0x0137c63d:
- ret = msgqueue_0137c63d_new(falcon, queue);
+ ret = msgqueue_0137c63d_new(falcon, sb, queue);
+ break;
+ case 0x0137bca5:
+ ret = msgqueue_0137bca5_new(falcon, sb, queue);
break;
case 0x0148cdec:
- ret = msgqueue_0148cdec_new(falcon, queue);
+ ret = msgqueue_0148cdec_new(falcon, sb, queue);
break;
default:
nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
index f37afe963d3e..13b54f8d8e04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
@@ -101,9 +101,11 @@ struct nvkm_msgqueue_init_func {
* struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
*
* @boot_falcon: build and send the command to reset a given falcon
+ * @boot_multiple_falcons: build and send the command to reset several falcons
*/
struct nvkm_msgqueue_acr_func {
int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
+ int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
};
struct nvkm_msgqueue_func {
@@ -201,7 +203,11 @@ int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
struct nvkm_msgqueue_queue *);
-int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
+int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
+int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
+int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
index bba91207fb18..fec0273158f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
@@ -43,6 +43,15 @@ struct msgqueue_0137c63d {
#define msgqueue_0137c63d(q) \
container_of(q, struct msgqueue_0137c63d, base)
+struct msgqueue_0137bca5 {
+ struct msgqueue_0137c63d base;
+
+ u64 wpr_addr;
+};
+#define msgqueue_0137bca5(q) \
+ container_of(container_of(q, struct msgqueue_0137c63d, base), \
+ struct msgqueue_0137bca5, base);
+
static struct nvkm_msgqueue_queue *
msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
enum msgqueue_msg_priority priority)
@@ -180,6 +189,7 @@ msgqueue_0137c63d_init_func = {
enum {
ACR_CMD_INIT_WPR_REGION = 0x00,
ACR_CMD_BOOTSTRAP_FALCON = 0x01,
+ ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
};
static void
@@ -286,11 +296,81 @@ acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
return 0;
}
+static void
+acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct acr_bootstrap_falcon_msg {
+ struct nvkm_msgqueue_msg base;
+
+ u32 falcon_mask;
+ } *msg = (void *)hdr;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ unsigned long falcon_mask = msg->falcon_mask;
+ u32 falcon_id, falcon_treated = 0;
+
+ for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ nvkm_debug(subdev, "%s booted\n",
+ nvkm_secboot_falcon_name[falcon_id]);
+ falcon_treated |= BIT(falcon_id);
+ }
+
+ if (falcon_treated != msg->falcon_mask) {
+ nvkm_error(subdev, "in bootstrap falcon callback:\n");
+ nvkm_error(subdev, "invalid falcon mask 0x%x\n",
+ msg->falcon_mask);
+ return;
+ }
+}
+
+static int
+acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(completed);
+ /*
+ * flags - Flag specifying RESET or no RESET.
+ * falcon id - Falcon id specifying falcon to bootstrap.
+ */
+ struct {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 cmd_type;
+ u32 flags;
+ u32 falcon_mask;
+ u32 use_va_mask;
+ u32 wpr_lo;
+ u32 wpr_hi;
+ } cmd;
+ struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
+ cmd.hdr.size = sizeof(cmd);
+ cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
+ cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
+ cmd.falcon_mask = falcon_mask;
+ cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
+ cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
+ nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
+ acr_boot_multiple_falcons_callback, &completed, true);
+
+ if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static const struct nvkm_msgqueue_acr_func
msgqueue_0137c63d_acr_func = {
.boot_falcon = acr_boot_falcon,
};
+static const struct nvkm_msgqueue_acr_func
+msgqueue_0137bca5_acr_func = {
+ .boot_falcon = acr_boot_falcon,
+ .boot_multiple_falcons = acr_boot_multiple_falcons,
+};
+
static void
msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
{
@@ -307,7 +387,8 @@ msgqueue_0137c63d_func = {
};
int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
{
struct msgqueue_0137c63d *ret;
@@ -321,3 +402,35 @@ msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
return 0;
}
+
+static const struct nvkm_msgqueue_func
+msgqueue_0137bca5_func = {
+ .init_func = &msgqueue_0137c63d_init_func,
+ .acr_func = &msgqueue_0137bca5_acr_func,
+ .cmd_queue = msgqueue_0137c63d_cmd_queue,
+ .recv = msgqueue_0137c63d_process_msgs,
+ .dtor = msgqueue_0137c63d_dtor,
+};
+
+int
+msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
+{
+ struct msgqueue_0137bca5 *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ *queue = &ret->base.base;
+
+ /*
+ * FIXME this must be set to the address of a *GPU* mapping within the
+ * ACR address space!
+ */
+ /* ret->wpr_addr = sb->wpr_addr; */
+
+ nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
index ed5d0da4f4e9..9424803b9ef4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
@@ -247,7 +247,8 @@ msgqueue_0148cdec_func = {
};
int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
{
struct msgqueue_0148cdec *ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 1c5e5ba487a8..2571530e82f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -28,6 +28,7 @@ nvkm-y += nvkm/subdev/fb/gm200.o
nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
+nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
new file mode 100644
index 000000000000..f2b1fbf428d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+static const struct nvkm_fb_func
+gp10b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp10b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 77c649723ad7..4a57defc99b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -164,7 +164,7 @@ static int
nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
- u32 mask = (1 << gpio->func->lines) - 1;
+ u32 mask = (1ULL << gpio->func->lines) - 1;
gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
gpio->func->intr_stat(gpio, &mask, &mask);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index ad572d3b5466..7f5883b78efa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ibus/gf117.o
nvkm-y += nvkm/subdev/ibus/gk104.o
nvkm-y += nvkm/subdev/ibus/gk20a.o
nvkm-y += nvkm/subdev/ibus/gm200.o
+nvkm-y += nvkm/subdev/ibus/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
new file mode 100644
index 000000000000..39db90aa2c80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/ibus.h>
+
+#include "priv.h"
+
+static int
+gp10b_ibus_init(struct nvkm_subdev *ibus)
+{
+ struct nvkm_device *device = ibus->device;
+
+ nvkm_wr32(device, 0x1200a8, 0x0);
+
+ /* init ring */
+ nvkm_wr32(device, 0x12004c, 0x4);
+ nvkm_wr32(device, 0x122204, 0x2);
+ nvkm_rd32(device, 0x122204);
+
+ /* timeout configuration */
+ nvkm_wr32(device, 0x009080, 0x800186a0);
+
+ return 0;
+}
+
+static const struct nvkm_subdev_func
+gp10b_ibus = {
+ .init = gp10b_ibus_init,
+ .intr = gk104_ibus_intr,
+};
+
+int
+gp10b_ibus_new(struct nvkm_device *device, int index,
+ struct nvkm_subdev **pibus)
+{
+ struct nvkm_subdev *ibus;
+ if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 9dec58ec3d9f..cd5adbec5e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */
- spinlock_t lock;
+ struct mutex lock;
/* CPU mappings LRU */
unsigned int vaddr_use;
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
- unsigned long flags;
nvkm_ltc_flush(ltc);
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
if (node->base.vaddr) {
if (!node->use_cpt) {
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
out:
node->use_cpt++;
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
return node->base.vaddr;
}
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
- unsigned long flags;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out:
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
wmb();
nvkm_ltc_invalidate(ltc);
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem;
- unsigned long flags;
int i;
if (unlikely(!r))
goto out;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
+ mutex_init(&imem->lock);
*pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 12943f92c206..2befbe36dc28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
+nvkm-y += nvkm/subdev/mc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 4d22f4abd6de..7321ad3758c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -42,7 +42,7 @@ gp100_mc_intr_update(struct gp100_mc *mc)
}
}
-static void
+void
gp100_mc_intr_unarm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -53,7 +53,7 @@ gp100_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-static void
+void
gp100_mc_intr_rearm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -64,7 +64,7 @@ gp100_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-static void
+void
gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -87,13 +87,14 @@ gp100_mc = {
};
int
-gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc **pmc)
{
struct gp100_mc *mc;
if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+ nvkm_mc_ctor(func, device, index, &mc->base);
*pmc = &mc->base;
spin_lock_init(&mc->lock);
@@ -101,3 +102,9 @@ gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
mc->mask = 0x7fffffff;
return 0;
}
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&gp100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
new file mode 100644
index 000000000000..2283e3b74277
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+void
+gp10b_mc_init(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
+ nvkm_wr32(device, 0x00020c, 0xffffffff); /* everything out of ELPG */
+}
+
+static const struct nvkm_mc_func
+gp10b_mc = {
+ .init = gp10b_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&gp10b_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4f0576a06d24..3be4126441e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -41,12 +41,18 @@ extern const struct nvkm_mc_map nv17_mc_reset[];
void nv44_mc_init(struct nvkm_mc *);
void nv50_mc_init(struct nvkm_mc *);
+void gk104_mc_init(struct nvkm_mc *);
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
+void gp100_mc_intr_unarm(struct nvkm_mc *);
+void gp100_mc_intr_rearm(struct nvkm_mc *);
+void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
+ struct nvkm_mc **);
extern const struct nvkm_mc_map gk104_mc_intr[];
extern const struct nvkm_mc_map gk104_mc_reset[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index ac7f50ae53c6..e698f4836521 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
+nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index 93d804652d44..b615fc81aca4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -39,8 +39,7 @@ struct nvkm_acr_func {
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64);
- int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
+ int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
};
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 993a38eb3ed5..a721354249ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -26,8 +26,6 @@
#include <core/gpuobj.h>
#include <core/firmware.h>
#include <engine/falcon.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
#include <subdev/pmu.h>
#include <core/msgqueue.h>
#include <engine/sec2.h>
@@ -241,6 +239,7 @@ struct ls_ucode_img_r352 {
*/
struct ls_ucode_img *
acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
+ const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
@@ -253,7 +252,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+ ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
@@ -462,12 +461,14 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
* will be copied into the WPR region by the HS firmware.
*/
static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
+acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
struct list_head imgs;
struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons;
+ u64 wpr_addr = sb->wpr_addr;
+ u32 wpr_size = sb->wpr_size;
int managed_count = 0;
u32 image_wpr_size, ls_blob_size;
int falcon_id;
@@ -479,7 +480,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
struct ls_ucode_img *img;
- img = acr->func->ls_ucode_img_load(acr, falcon_id);
+ img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
if (IS_ERR(img)) {
if (acr->base.optional_falcons & BIT(falcon_id)) {
managed_falcons &= ~BIT(falcon_id);
@@ -704,7 +705,7 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
return 0;
/* Load and prepare the managed falcon's firmwares */
- ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
+ ret = acr_r352_prepare_ls_blob(acr, sb);
if (ret)
return ret;
@@ -882,7 +883,6 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = &sb->subdev;
unsigned long managed_falcons = acr->base.managed_falcons;
- u32 reg;
int falcon_id;
int ret;
@@ -917,54 +917,13 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
const struct acr_r352_ls_func *func =
acr->func->ls_func[falcon_id];
- if (func->post_run)
- func->post_run(&acr->base, sb);
- }
-
- /* Re-start ourselves if we are managed */
- if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
- return 0;
-
- /* Enable interrupts */
- nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
- nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
-
- /* Start LS firmware on boot falcon */
- nvkm_falcon_start(sb->boot_falcon);
-
- /*
- * There is a bug where the LS firmware sometimes require to be started
- * twice (this happens only on SEC). Detect and workaround that
- * condition.
- *
- * Once started, the falcon will end up in STOPPED condition (bit 5)
- * if successful, or in HALT condition (bit 4) if not.
- */
- nvkm_msec(subdev->device, 1,
- if ((reg = nvkm_rd32(subdev->device,
- sb->boot_falcon->addr + 0x100)
- & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_debug(subdev, "applying workaround for start bug...");
- nvkm_falcon_start(sb->boot_falcon);
- nvkm_msec(subdev->device, 1,
- if ((reg = nvkm_rd32(subdev->device,
- sb->boot_falcon->addr + 0x100)
- & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_error(subdev, "%s failed to start\n",
- nvkm_secboot_falcon_name[acr->base.boot_falcon]);
- return -EINVAL;
+ if (func->post_run) {
+ ret = func->post_run(&acr->base, sb);
+ if (ret)
+ return ret;
}
}
- nvkm_debug(subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->base.boot_falcon]);
-
return 0;
}
@@ -976,15 +935,16 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
*/
static int
acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+ unsigned long falcon_mask)
{
+ int falcon;
int ret;
/*
* Perform secure boot each time we are called on FECS. Since only FECS
* and GPCCS are managed and started together, this ought to be safe.
*/
- if (falcon != NVKM_SECBOOT_FALCON_FECS)
+ if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
goto end;
ret = acr_r352_shutdown(acr, sb);
@@ -996,7 +956,9 @@ acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
return ret;
end:
- acr->falcon_state[falcon] = RESET;
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ acr->falcon_state[falcon] = RESET;
+ }
return 0;
}
@@ -1009,11 +971,11 @@ end:
*/
static int
acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+ unsigned long falcon_mask)
{
struct acr_r352 *acr = acr_r352(_acr);
struct nvkm_msgqueue *queue;
- const char *fname = nvkm_secboot_falcon_name[falcon];
+ int falcon;
bool wpr_already_set = sb->wpr_set;
int ret;
@@ -1026,7 +988,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
/* Redo secure boot entirely if it was already done */
if (wpr_already_set)
- return acr_r352_reset_nopmu(acr, sb, falcon);
+ return acr_r352_reset_nopmu(acr, sb, falcon_mask);
/* Else return the result of the initial invokation */
else
return ret;
@@ -1044,13 +1006,15 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
}
/* Otherwise just ask the LS firmware to reset the falcon */
- nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
- ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
+ nvkm_debug(&sb->subdev, "resetting %s falcon\n",
+ nvkm_secboot_falcon_name[falcon]);
+ ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
if (ret) {
- nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
+ nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
return ret;
}
- nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
+ nvkm_debug(&sb->subdev, "falcon reset done\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 6e88520566c9..3d58ab871563 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -57,11 +57,11 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
* @lhdr_flags: LS flags
*/
struct acr_r352_ls_func {
- int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *);
+ int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
- void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
+ int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
@@ -82,6 +82,7 @@ struct acr_r352_func {
bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
@@ -145,6 +146,7 @@ struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
enum nvkm_secboot_falcon, unsigned long);
struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index f860713642f1..866877b88797 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -107,6 +107,7 @@ struct ls_ucode_img_r367 {
struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
+ const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
@@ -119,7 +120,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+ ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
index ec6a71ca36be..8bdfb3e5cd1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
@@ -28,6 +28,7 @@
void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 5c11e8c50964..ee29c6c11afd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -102,15 +102,15 @@ nvkm_secboot_falcon_name[] = {
* nvkm_secboot_reset() - reset specified falcon
*/
int
-nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
{
/* Unmanaged falcon? */
- if (!(BIT(falcon) & sb->acr->managed_falcons)) {
+ if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
return -EINVAL;
}
- return sb->acr->func->reset(sb->acr, sb, falcon);
+ return sb->acr->func->reset(sb->acr, sb, falcon_mask);
}
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index 6dc9fc384f24..c8ab3d76bdef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -41,4 +41,7 @@ void *gm200_secboot_dtor(struct nvkm_secboot *);
int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
+/* Tegra-only */
+int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 29e6f73dfd7e..b10ed59a4911 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -23,28 +23,29 @@
#include "acr.h"
#include "gm200.h"
+#define TEGRA210_MC_BASE 0x70019000
+
#ifdef CONFIG_ARCH_TEGRA
-#define TEGRA_MC_BASE 0x70019000
#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
/**
- * sb_tegra_read_wpr() - read the WPR registers on Tegra
+ * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
*
* On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
* is reserved from system memory by the bootloader and irreversibly locked.
* This function reads the address and size of the pre-configured WPR region.
*/
-static int
-gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+int
+gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
struct nvkm_secboot *sb = &gsb->base;
void __iomem *mc;
u32 cfg;
- mc = ioremap(TEGRA_MC_BASE, 0xd00);
+ mc = ioremap(mc_base, 0xd00);
if (!mc) {
nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
return PTR_ERR(mc);
@@ -70,8 +71,8 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
return 0;
}
#else
-static int
-gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+int
+gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
return -EINVAL;
@@ -84,7 +85,7 @@ gm20b_secboot_oneinit(struct nvkm_secboot *sb)
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
- ret = gm20b_tegra_read_wpr(gsb);
+ ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
new file mode 100644
index 000000000000..632e9545e292
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr.h"
+#include "gm200.h"
+
+#define TEGRA186_MC_BASE 0x02c10000
+
+static int
+gp10b_secboot_oneinit(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret;
+
+ ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
+ if (ret)
+ return ret;
+
+ return gm200_secboot_oneinit(sb);
+}
+
+static const struct nvkm_secboot_func
+gp10b_secboot = {
+ .dtor = gm200_secboot_dtor,
+ .oneinit = gp10b_secboot_oneinit,
+ .fini = gm200_secboot_fini,
+ .run_blob = gm200_secboot_run_blob,
+};
+
+int
+gp10b_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ int ret;
+ struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS) |
+ BIT(NVKM_SECBOOT_FALCON_PMU));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
+
+ gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+ if (!gsb) {
+ psb = NULL;
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 4ff9138a2a83..9b7c402594e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,11 @@ struct fw_bl_desc {
u32 data_size;
};
-int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
-void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
-void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
+int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
+int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 40a6df77bb8a..d1cf02d22db1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -144,15 +144,13 @@ error:
}
int
-acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(subdev, img, "fecs");
+ return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
}
int
-acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(subdev, img, "gpccs");
+ return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index ef0b298b70d7..ee989210725e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -28,6 +28,8 @@
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
/**
* acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
@@ -73,10 +75,11 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
return 0;
}
-static void
+static int
acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
struct nvkm_falcon *falcon, u32 addr_args)
{
+ struct nvkm_device *device = falcon->owner->device;
u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
u8 buf[cmdline_size];
@@ -85,65 +88,118 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
/* rearm the queue so it will wait for the init message */
nvkm_msgqueue_reinit(queue);
+
+ /* Enable interrupts */
+ nvkm_falcon_wr32(falcon, 0x10, 0xff);
+ nvkm_mc_intr_mask(device, falcon->owner->index, true);
+
+ /* Start LS firmware on boot falcon */
+ nvkm_falcon_start(falcon);
+
+ return 0;
}
int
-acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- struct nvkm_pmu *pmu = subdev->device->pmu;
+ struct nvkm_pmu *pmu = sb->subdev.device->pmu;
int ret;
- ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
- &pmu->queue);
+ sb, &pmu->queue);
if (ret)
return ret;
return 0;
}
-void
+int
acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
struct nvkm_pmu *pmu = device->pmu;
u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
+ int ret;
+
+ ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
+ if (ret)
+ return ret;
- acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
+ nvkm_debug(&sb->subdev, "%s started\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+
+ return 0;
}
int
-acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- struct nvkm_sec2 *sec = subdev->device->sec2;
+ struct nvkm_sec2 *sec = sb->subdev.device->sec2;
int ret;
- ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
- &sec->queue);
+ sb, &sec->queue);
if (ret)
return ret;
return 0;
}
-void
+int
acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
- struct nvkm_device *device = sb->subdev.device;
+ const struct nvkm_subdev *subdev = &sb->subdev;
+ struct nvkm_device *device = subdev->device;
struct nvkm_sec2 *sec = device->sec2;
/* on SEC arguments are always at the beginning of EMEM */
- u32 addr_args = 0x01000000;
+ const u32 addr_args = 0x01000000;
+ u32 reg;
+ int ret;
- acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
+ ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
+ if (ret)
+ return ret;
+
+ /*
+ * There is a bug where the LS firmware sometimes require to be started
+ * twice (this happens only on SEC). Detect and workaround that
+ * condition.
+ *
+ * Once started, the falcon will end up in STOPPED condition (bit 5)
+ * if successful, or in HALT condition (bit 4) if not.
+ */
+ nvkm_msec(device, 1,
+ if ((reg = nvkm_falcon_rd32(sb->boot_falcon, 0x100) & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_debug(subdev, "applying workaround for start bug...");
+ nvkm_falcon_start(sb->boot_falcon);
+ nvkm_msec(subdev->device, 1,
+ if ((reg = nvkm_rd32(subdev->device,
+ sb->boot_falcon->addr + 0x100)
+ & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_error(subdev, "%s failed to start\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+ return -EINVAL;
+ }
+ }
+
+ nvkm_debug(&sb->subdev, "%s started\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 62aba976e744..5dc2106da2bc 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -7,6 +7,16 @@ config DRM_PANEL
menu "Display Panels"
depends on DRM && DRM_PANEL
+config DRM_PANEL_LVDS
+ tristate "Generic LVDS panel driver"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ This driver supports LVDS panels that don't require device-specific
+ handling of power supplies or control signals. It implements automatic
+ backlight handling if the panel is attached to a backlight controller.
+
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index a5c7ec0236e0..20b5060d1f47 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
new file mode 100644
index 000000000000..3216aa9a88d6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -0,0 +1,286 @@
+/*
+ * rcar_du_crtc.c -- R-Car Display Unit CRTCs
+ *
+ * Copyright (C) 2016 Laurent Pinchart
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+struct panel_lvds {
+ struct drm_panel panel;
+ struct device *dev;
+
+ const char *label;
+ unsigned int width;
+ unsigned int height;
+ struct videomode video_mode;
+ unsigned int bus_format;
+ bool data_mirror;
+
+ struct backlight_device *backlight;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_lvds, panel);
+}
+
+static int panel_lvds_disable(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->backlight) {
+ lvds->backlight->props.power = FB_BLANK_POWERDOWN;
+ lvds->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(lvds->backlight);
+ }
+
+ return 0;
+}
+
+static int panel_lvds_unprepare(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->enable_gpio)
+ gpiod_set_value_cansleep(lvds->enable_gpio, 0);
+
+ return 0;
+}
+
+static int panel_lvds_prepare(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->enable_gpio)
+ gpiod_set_value_cansleep(lvds->enable_gpio, 1);
+
+ return 0;
+}
+
+static int panel_lvds_enable(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->backlight) {
+ lvds->backlight->props.state &= ~BL_CORE_FBBLANK;
+ lvds->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(lvds->backlight);
+ }
+
+ return 0;
+}
+
+static int panel_lvds_get_modes(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+ struct drm_connector *connector = lvds->panel.connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(lvds->panel.drm);
+ if (!mode)
+ return 0;
+
+ drm_display_mode_from_videomode(&lvds->video_mode, mode);
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = lvds->width;
+ connector->display_info.height_mm = lvds->height;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &lvds->bus_format, 1);
+ connector->display_info.bus_flags = lvds->data_mirror
+ ? DRM_BUS_FLAG_DATA_LSB_TO_MSB
+ : DRM_BUS_FLAG_DATA_MSB_TO_LSB;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_lvds_funcs = {
+ .disable = panel_lvds_disable,
+ .unprepare = panel_lvds_unprepare,
+ .prepare = panel_lvds_prepare,
+ .enable = panel_lvds_enable,
+ .get_modes = panel_lvds_get_modes,
+};
+
+static int panel_lvds_parse_dt(struct panel_lvds *lvds)
+{
+ struct device_node *np = lvds->dev->of_node;
+ struct display_timing timing;
+ const char *mapping;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &timing);
+ if (ret < 0)
+ return ret;
+
+ videomode_from_timing(&timing, &lvds->video_mode);
+
+ ret = of_property_read_u32(np, "width-mm", &lvds->width);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "width-mm");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(np, "height-mm", &lvds->height);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "height-mm");
+ return -ENODEV;
+ }
+
+ of_property_read_string(np, "label", &lvds->label);
+
+ ret = of_property_read_string(np, "data-mapping", &mapping);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "data-mapping");
+ return -ENODEV;
+ }
+
+ if (!strcmp(mapping, "jeida-18")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
+ } else if (!strcmp(mapping, "jeida-24")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ } else if (!strcmp(mapping, "vesa-24")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
+ } else {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "data-mapping");
+ return -EINVAL;
+ }
+
+ lvds->data_mirror = of_property_read_bool(np, "data-mirror");
+
+ return 0;
+}
+
+static int panel_lvds_probe(struct platform_device *pdev)
+{
+ struct panel_lvds *lvds;
+ struct device_node *np;
+ int ret;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = &pdev->dev;
+
+ ret = panel_lvds_parse_dt(lvds);
+ if (ret < 0)
+ return ret;
+
+ /* Get GPIOs and backlight controller. */
+ lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lvds->enable_gpio)) {
+ ret = PTR_ERR(lvds->enable_gpio);
+ dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
+ "enable", ret);
+ return ret;
+ }
+
+ lvds->reset_gpio = devm_gpiod_get_optional(lvds->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(lvds->reset_gpio)) {
+ ret = PTR_ERR(lvds->reset_gpio);
+ dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
+ "reset", ret);
+ return ret;
+ }
+
+ np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
+ if (np) {
+ lvds->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!lvds->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ /*
+ * TODO: Handle all power supplies specified in the DT node in a generic
+ * way for panels that don't care about power supply ordering. LVDS
+ * panels that require a specific power sequence will need a dedicated
+ * driver.
+ */
+
+ /* Register the panel. */
+ drm_panel_init(&lvds->panel);
+ lvds->panel.dev = lvds->dev;
+ lvds->panel.funcs = &panel_lvds_funcs;
+
+ ret = drm_panel_add(&lvds->panel);
+ if (ret < 0)
+ goto error;
+
+ dev_set_drvdata(lvds->dev, lvds);
+ return 0;
+
+error:
+ put_device(&lvds->backlight->dev);
+ return ret;
+}
+
+static int panel_lvds_remove(struct platform_device *pdev)
+{
+ struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
+
+ drm_panel_detach(&lvds->panel);
+ drm_panel_remove(&lvds->panel);
+
+ panel_lvds_disable(&lvds->panel);
+
+ if (lvds->backlight)
+ put_device(&lvds->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id panel_lvds_of_table[] = {
+ { .compatible = "panel-lvds", },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
+
+static struct platform_driver panel_lvds_driver = {
+ .probe = panel_lvds_probe,
+ .remove = panel_lvds_remove,
+ .driver = {
+ .name = "panel-lvds",
+ .of_match_table = panel_lvds_of_table,
+ },
+};
+
+module_platform_driver(panel_lvds_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LVDS Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7d1cab57c89e..0fdedee4509d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aefca0b03f38..c31e660e35db 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3295,7 +3295,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 11) & 0xf) + 4;
} else if (rdev->family == CHIP_RV350 ||
- rdev->family <= CHIP_RV380) {
+ rdev->family == CHIP_RV380) {
/* rv3x0 */
mem_trcd = (temp & 0x7) + 3;
mem_trp = ((temp >> 8) & 0x7) + 3;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e400dc414e3..c1c8e2208a21 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -499,6 +499,7 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a8442f7196d6..df6b58c08544 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -164,6 +164,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].allowed_domains = domain;
}
+ /* Objects shared as dma-bufs cannot be moved to VRAM */
+ if (p->relocs[i].robj->prime_shared_count) {
+ p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
+ if (!p->relocs[i].allowed_domains) {
+ DRM_ERROR("BO associated with dma-buf cannot "
+ "be moved to VRAM\n");
+ return -EINVAL;
+ }
+ }
+
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 981385eb5389..17d3dafc8319 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1355,6 +1355,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9b0b123ce079..dddb372de2b9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -120,6 +120,10 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
return r;
}
}
+ if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
+ /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 74b276060c20..bec2ec056de4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -352,6 +352,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
+ if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
+
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f3609c97496b..7110d403322c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,6 +77,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
@@ -91,6 +92,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}
@@ -105,6 +109,8 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
return;
radeon_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
radeon_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index aaa3e80fecb4..8b7623b5a624 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 4c2fd056dd6d..8a50dab19e5c 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -11,15 +11,17 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
-config DRM_RCAR_HDMI
- bool "R-Car DU HDMI Encoder Support"
- depends on DRM_RCAR_DU
+config DRM_RCAR_DW_HDMI
+ tristate "R-Car DU Gen3 HDMI Encoder Support"
+ depends on DRM && OF
+ select DRM_DW_HDMI
help
- Enable support for external HDMI encoders.
+ Enable support for R-Car Gen3 internal HDMI encoder.
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
+ select DRM_PANEL
help
Enable support for the R-Car Display Unit embedded LVDS encoders.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index d3b44651061a..2131e722de3b 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,13 +4,11 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_group.o \
rcar_du_kms.o \
rcar_du_lvdscon.o \
- rcar_du_plane.o \
- rcar_du_vgacon.o
-
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
+ rcar_du_plane.o
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
+obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index edcbe2e3625d..4ed6f2340af0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -106,9 +106,62 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
* Hardware Setup
*/
+struct dpll_info {
+ unsigned int output;
+ unsigned int fdpll;
+ unsigned int n;
+ unsigned int m;
+};
+
+static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
+ struct dpll_info *dpll,
+ unsigned long input,
+ unsigned long target)
+{
+ unsigned long best_diff = (unsigned long)-1;
+ unsigned long diff;
+ unsigned int fdpll;
+ unsigned int m;
+ unsigned int n;
+
+ for (n = 39; n < 120; n++) {
+ for (m = 0; m < 4; m++) {
+ for (fdpll = 1; fdpll < 32; fdpll++) {
+ unsigned long output;
+
+ /* 1/2 (FRQSEL=1) for duty rate 50% */
+ output = input * (n + 1) / (m + 1)
+ / (fdpll + 1) / 2;
+
+ if (output >= 400000000)
+ continue;
+
+ diff = abs((long)output - (long)target);
+ if (best_diff > diff) {
+ best_diff = diff;
+ dpll->n = n;
+ dpll->m = m;
+ dpll->fdpll = fdpll;
+ dpll->output = output;
+ }
+
+ if (diff == 0)
+ goto done;
+ }
+ }
+ }
+
+done:
+ dev_dbg(rcrtc->group->dev->dev,
+ "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
+ dpll->output, dpll->fdpll, dpll->n, dpll->m,
+ best_diff);
+}
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
@@ -124,12 +177,18 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = div | ESCR_DCLKSEL_CLKS;
if (rcrtc->extclock) {
+ struct dpll_info dpll = { 0 };
unsigned long extclk;
unsigned long extrate;
unsigned long rate;
u32 extdiv;
extclk = clk_get_rate(rcrtc->extclock);
+ if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock);
+ extclk = dpll.output;
+ }
+
extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
extdiv = clamp(extdiv, 1U, 64U) - 1;
@@ -140,7 +199,27 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
abs((long)rate - (long)mode_clock)) {
dev_dbg(rcrtc->group->dev->dev,
"crtc%u: using external clock\n", rcrtc->index);
- escr = extdiv | ESCR_DCLKSEL_DCLKIN;
+
+ if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
+ u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
+ | DPLLCR_FDPLL(dpll.fdpll)
+ | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ | DPLLCR_STBY;
+
+ if (rcrtc->index == 1)
+ dpllcr |= DPLLCR_PLCS1
+ | DPLLCR_INCS_DOTCLKIN1;
+ else
+ dpllcr |= DPLLCR_PLCS0
+ | DPLLCR_INCS_DOTCLKIN0;
+
+ rcar_du_group_write(rcrtc->group, DPLLCR,
+ dpllcr);
+
+ escr = ESCR_DCLKSEL_DCLKIN | 1;
+ } else {
+ escr = ESCR_DCLKSEL_DCLKIN | extdiv;
+ }
}
}
@@ -488,22 +567,29 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc)
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
rcrtc->outputs = 0;
}
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
- if (event) {
+ if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- rcrtc->event = event;
+ rcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index a7194812997e..15871fae7445 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -1,7 +1,7 @@
/*
* rcar_du_crtc.h -- R-Car Display Unit CRTCs
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -61,6 +61,8 @@ enum rcar_du_output {
RCAR_DU_OUTPUT_DPAD1,
RCAR_DU_OUTPUT_LVDS0,
RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_HDMI0,
+ RCAR_DU_OUTPUT_HDMI1,
RCAR_DU_OUTPUT_TCON,
RCAR_DU_OUTPUT_MAX,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 62a3b3e32153..d6a0255181cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -44,12 +44,10 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -68,17 +66,14 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2) | BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(2) | BIT(1),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 2,
},
},
@@ -97,12 +92,10 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
},
@@ -118,12 +111,10 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
/* R8A7792 has two RGB outputs. */
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -141,12 +132,10 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -160,21 +149,28 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 4,
.routes = {
- /* R8A7795 has one RGB output, one LVDS output and two
- * (currently unsupported) HDMI outputs.
+ /* R8A7795 has one RGB output, two HDMI outputs and one
+ * LVDS output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(3),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_HDMI1] = {
+ .possible_crtcs = BIT(2),
+ .port = 2,
+ },
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 3,
},
},
.num_lvds = 1,
+ .dpll_ch = BIT(1) | BIT(2),
};
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
@@ -189,12 +185,10 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 2,
},
},
@@ -318,10 +312,8 @@ static int rcar_du_probe(struct platform_device *pdev)
if (rcdu == NULL)
return -ENOMEM;
- init_waitqueue_head(&rcdu->commit.wait);
-
rcdu->dev = &pdev->dev;
- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+ rcdu->info = of_device_get_match_data(rcdu->dev);
platform_set_drvdata(pdev, rcdu);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index c843c3134498..f8cd79488ece 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -38,7 +38,6 @@ struct rcar_du_lvdsenc;
/*
* struct rcar_du_output_routing - Output routing specification
* @possible_crtcs: bitmask of possible CRTCs for the output
- * @encoder_type: DRM type of the internal encoder associated with the output
* @port: device tree port number corresponding to this output route
*
* The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
@@ -47,7 +46,6 @@ struct rcar_du_lvdsenc;
*/
struct rcar_du_output_routing {
unsigned int possible_crtcs;
- unsigned int encoder_type;
unsigned int port;
};
@@ -67,6 +65,7 @@ struct rcar_du_device_info {
unsigned int num_crtcs;
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
+ unsigned int dpll_ch;
};
#define RCAR_DU_MAX_CRTCS 4
@@ -98,11 +97,6 @@ struct rcar_du_device {
unsigned int vspd1_sink;
struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
-
- struct {
- wait_queue_head_t wait;
- u32 pending;
- } commit;
};
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index ab8645c57e2d..3e048dd98b64 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -16,14 +16,13 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
#include "rcar_du_lvdsenc.h"
-#include "rcar_du_vgacon.h"
/* -----------------------------------------------------------------------------
* Encoder
@@ -33,6 +32,11 @@ static void rcar_du_encoder_disable(struct drm_encoder *encoder)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ if (renc->connector && renc->connector->panel) {
+ drm_panel_disable(renc->connector->panel);
+ drm_panel_unprepare(renc->connector->panel);
+ }
+
if (renc->lvds)
rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
}
@@ -43,6 +47,11 @@ static void rcar_du_encoder_enable(struct drm_encoder *encoder)
if (renc->lvds)
rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
+
+ if (renc->connector && renc->connector->panel) {
+ drm_panel_prepare(renc->connector->panel);
+ drm_panel_enable(renc->connector->panel);
+ }
}
static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
@@ -52,30 +61,36 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
const struct drm_display_mode *mode = &crtc_state->mode;
- const struct drm_display_mode *panel_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_device *dev = encoder->dev;
- /* DAC encoders have currently no restriction on the mode. */
- if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
- return 0;
-
- if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "encoder: empty modes list\n");
- return -EINVAL;
+ /*
+ * Only panel-related encoder types require validation here, everything
+ * else is handled by the bridge drivers.
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ const struct drm_display_mode *panel_mode;
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(dev->dev, "encoder: empty modes list\n");
+ return -EINVAL;
+ }
+
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+
+ /* We're not allowed to modify the resolution. */
+ if (mode->hdisplay != panel_mode->hdisplay ||
+ mode->vdisplay != panel_mode->vdisplay)
+ return -EINVAL;
+
+ /*
+ * The flat panel mode is fixed, just copy it to the adjusted
+ * mode.
+ */
+ drm_mode_copy(adjusted_mode, panel_mode);
}
- panel_mode = list_first_entry(&connector->modes,
- struct drm_display_mode, head);
-
- /* We're not allowed to modify the resolution. */
- if (mode->hdisplay != panel_mode->hdisplay ||
- mode->vdisplay != panel_mode->vdisplay)
- return -EINVAL;
-
- /* The flat panel mode is fixed, just copy it to the adjusted mode. */
- drm_mode_copy(adjusted_mode, panel_mode);
-
if (renc->lvds)
rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode);
@@ -83,16 +98,54 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
}
static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct drm_display_info *info = &conn_state->connector->display_info;
+ enum rcar_lvds_mode mode;
+
+ rcar_du_crtc_route_output(crtc_state->crtc, renc->output);
+
+ if (!renc->lvds) {
+ /*
+ * The DU driver creates connectors only for the outputs of the
+ * internal LVDS encoders.
+ */
+ renc->connector = NULL;
+ return;
+ }
+
+ renc->connector = to_rcar_connector(conn_state->connector);
+
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_err(encoder->dev->dev, "no LVDS bus format reported\n");
+ return;
+ }
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ mode = RCAR_LVDS_MODE_JEIDA;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ mode = RCAR_LVDS_MODE_VESA;
+ break;
+ default:
+ dev_err(encoder->dev->dev,
+ "unsupported LVDS bus format 0x%04x\n",
+ info->bus_formats[0]);
+ return;
+ }
+
+ if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+ mode |= RCAR_LVDS_MODE_MIRROR;
- rcar_du_crtc_route_output(encoder->crtc, renc->output);
+ rcar_du_lvdsenc_set_mode(renc->lvds, mode);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .mode_set = rcar_du_encoder_mode_set,
+ .atomic_mode_set = rcar_du_encoder_mode_set,
.disable = rcar_du_encoder_disable,
.enable = rcar_du_encoder_enable,
.atomic_check = rcar_du_encoder_atomic_check,
@@ -103,14 +156,13 @@ static const struct drm_encoder_funcs encoder_funcs = {
};
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
- enum rcar_du_encoder_type type,
enum rcar_du_output output,
struct device_node *enc_node,
struct device_node *con_node)
{
struct rcar_du_encoder *renc;
struct drm_encoder *encoder;
- unsigned int encoder_type;
+ struct drm_bridge *bridge = NULL;
int ret;
renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
@@ -133,52 +185,51 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
}
- switch (type) {
- case RCAR_DU_ENCODER_VGA:
- encoder_type = DRM_MODE_ENCODER_DAC;
- break;
- case RCAR_DU_ENCODER_LVDS:
- encoder_type = DRM_MODE_ENCODER_LVDS;
- break;
- case RCAR_DU_ENCODER_HDMI:
- encoder_type = DRM_MODE_ENCODER_TMDS;
- break;
- case RCAR_DU_ENCODER_NONE:
- default:
- /* No external encoder, use the internal encoder type. */
- encoder_type = rcdu->info->routes[output].encoder_type;
- break;
- }
+ if (enc_node) {
+ dev_dbg(rcdu->dev, "initializing encoder %s for output %u\n",
+ of_node_full_name(enc_node), output);
- if (type == RCAR_DU_ENCODER_HDMI) {
- ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
- if (ret < 0)
+ /* Locate the DRM bridge from the encoder DT node. */
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
goto done;
+ }
} else {
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- encoder_type, NULL);
- if (ret < 0)
- goto done;
-
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+ dev_dbg(rcdu->dev,
+ "initializing internal encoder for output %u\n",
+ output);
}
- switch (encoder_type) {
- case DRM_MODE_ENCODER_LVDS:
- ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
- break;
-
- case DRM_MODE_ENCODER_DAC:
- ret = rcar_du_vga_connector_init(rcdu, renc);
- break;
-
- case DRM_MODE_ENCODER_TMDS:
- /* connector managed by the bridge driver */
- break;
-
- default:
- ret = -EINVAL;
- break;
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret < 0)
+ goto done;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ if (bridge) {
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ } else {
+ /* There's no bridge, create the connector manually. */
+ switch (output) {
+ case RCAR_DU_OUTPUT_LVDS0:
+ case RCAR_DU_OUTPUT_LVDS1:
+ ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
}
done:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index a050a3699857..5422fa4df272 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -17,22 +17,14 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+struct drm_panel;
struct rcar_du_device;
-struct rcar_du_hdmienc;
struct rcar_du_lvdsenc;
-enum rcar_du_encoder_type {
- RCAR_DU_ENCODER_UNUSED = 0,
- RCAR_DU_ENCODER_NONE,
- RCAR_DU_ENCODER_VGA,
- RCAR_DU_ENCODER_LVDS,
- RCAR_DU_ENCODER_HDMI,
-};
-
struct rcar_du_encoder {
struct drm_encoder base;
enum rcar_du_output output;
- struct rcar_du_hdmienc *hdmi;
+ struct rcar_du_connector *connector;
struct rcar_du_lvdsenc *lvds;
};
@@ -44,13 +36,13 @@ struct rcar_du_encoder {
struct rcar_du_connector {
struct drm_connector connector;
struct rcar_du_encoder *encoder;
+ struct drm_panel *panel;
};
#define to_rcar_connector(c) \
container_of(c, struct rcar_du_connector, connector)
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
- enum rcar_du_encoder_type type,
enum rcar_du_output output,
struct device_node *enc_node,
struct device_node *con_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
deleted file mode 100644
index c4c5d1abcff8..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * R-Car Display Unit HDMI Encoder
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/slab.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_hdmienc.h"
-#include "rcar_du_lvdsenc.h"
-
-struct rcar_du_hdmienc {
- struct rcar_du_encoder *renc;
- bool enabled;
-};
-
-#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
-
-static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
- false);
-
- hdmienc->enabled = false;
-}
-
-static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
- true);
-
- hdmienc->enabled = true;
-}
-
-static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
- adjusted_mode);
-
- return 0;
-}
-
-
-static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .mode_set = rcar_du_hdmienc_mode_set,
- .disable = rcar_du_hdmienc_disable,
- .enable = rcar_du_hdmienc_enable,
- .atomic_check = rcar_du_hdmienc_atomic_check,
-};
-
-static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->enabled)
- rcar_du_hdmienc_disable(encoder);
-
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = rcar_du_hdmienc_cleanup,
-};
-
-int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc, struct device_node *np)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct drm_bridge *bridge;
- struct rcar_du_hdmienc *hdmienc;
- int ret;
-
- hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
- if (hdmienc == NULL)
- return -ENOMEM;
-
- /* Locate the DRM bridge from the HDMI encoder DT node. */
- bridge = of_drm_find_bridge(np);
- if (!bridge)
- return -EPROBE_DEFER;
-
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
- renc->hdmi = hdmienc;
- hdmienc->renc = renc;
-
- /* Link the bridge to the encoder. */
- ret = drm_bridge_attach(encoder, bridge, NULL);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
deleted file mode 100644
index 2ff0128ac8e1..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * R-Car Display Unit HDMI Encoder
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_HDMIENC_H__
-#define __RCAR_DU_HDMIENC_H__
-
-#include <linux/module.h>
-
-struct device_node;
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
-int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc, struct device_node *np);
-#else
-static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- struct device_node *np)
-{
- return -ENOSYS;
-}
-#endif
-
-#endif /* __RCAR_DU_HDMIENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ff61f6032f2c..f4125c8ca902 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -249,18 +249,9 @@ static int rcar_du_atomic_check(struct drm_device *dev,
return rcar_du_atomic_check_planes(dev, state);
}
-struct rcar_du_commit {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_atomic_state *state;
- u32 crtcs;
-};
-
-static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
+static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = commit->dev;
- struct rcar_du_device *rcdu = dev->dev_private;
- struct drm_atomic_state *old_state = commit->state;
+ struct drm_device *dev = old_state->dev;
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -268,114 +259,31 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
-
- drm_atomic_state_put(old_state);
-
- /* Complete the commit, wake up any waiter. */
- spin_lock(&rcdu->commit.wait.lock);
- rcdu->commit.pending &= ~commit->crtcs;
- wake_up_all_locked(&rcdu->commit.wait);
- spin_unlock(&rcdu->commit.wait.lock);
-
- kfree(commit);
-}
-
-static void rcar_du_atomic_work(struct work_struct *work)
-{
- struct rcar_du_commit *commit =
- container_of(work, struct rcar_du_commit, work);
-
- rcar_du_atomic_complete(commit);
-}
-
-static int rcar_du_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
- struct rcar_du_commit *commit;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- unsigned int i;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Allocate the commit object. */
- commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- if (commit == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- INIT_WORK(&commit->work, rcar_du_atomic_work);
- commit->dev = dev;
- commit->state = state;
-
- /* Wait until all affected CRTCs have completed previous commits and
- * mark them as pending.
- */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- commit->crtcs |= drm_crtc_mask(crtc);
-
- spin_lock(&rcdu->commit.wait.lock);
- ret = wait_event_interruptible_locked(rcdu->commit.wait,
- !(rcdu->commit.pending & commit->crtcs));
- if (ret == 0)
- rcdu->commit.pending |= commit->crtcs;
- spin_unlock(&rcdu->commit.wait.lock);
-
- if (ret) {
- kfree(commit);
- goto error;
- }
-
- /* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(state, true);
-
- drm_atomic_state_get(state);
- if (nonblock)
- schedule_work(&commit->work);
- else
- rcar_du_atomic_complete(commit);
-
- return 0;
-
-error:
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
}
/* -----------------------------------------------------------------------------
* Initialization
*/
+static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
+ .atomic_commit_tail = rcar_du_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
.output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check,
- .atomic_commit = rcar_du_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct of_endpoint *ep)
{
- static const struct {
- const char *compatible;
- enum rcar_du_encoder_type type;
- } encoders[] = {
- { "adi,adv7123", RCAR_DU_ENCODER_VGA },
- { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
- { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
- };
-
- enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
struct device_node *connector = NULL;
struct device_node *encoder = NULL;
struct device_node *ep_node = NULL;
@@ -394,6 +302,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
return -ENODEV;
}
+ if (!of_device_is_available(entity)) {
+ dev_dbg(rcdu->dev,
+ "connected entity %s is disabled, skipping\n",
+ entity->full_name);
+ return -ENODEV;
+ }
+
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
for_each_endpoint_of_node(entity, ep_node) {
@@ -422,30 +337,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
of_node_put(entity_ep_node);
- if (encoder) {
- /*
- * If an encoder has been found, get its type based on its
- * compatible string.
- */
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
- if (of_device_is_compatible(encoder,
- encoders[i].compatible)) {
- enc_type = encoders[i].type;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(encoders)) {
- dev_warn(rcdu->dev,
- "unknown encoder type for %s, skipping\n",
- encoder->full_name);
- of_node_put(encoder);
- of_node_put(connector);
- return -EINVAL;
- }
- } else {
+ if (!encoder) {
/*
* If no encoder has been found the entity must be the
* connector.
@@ -453,7 +345,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
connector = entity;
}
- ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
+ ret = rcar_du_encoder_init(rcdu, output, encoder, connector);
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
"failed to initialize encoder %s on output %u (%d), skipping\n",
@@ -561,6 +453,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
dev->mode_config.max_width = 4095;
dev->mode_config.max_height = 2047;
dev->mode_config.funcs = &rcar_du_mode_config_funcs;
+ dev->mode_config.helper_private = &rcar_du_mode_config_helper;
rcdu->num_crtcs = rcdu->info->num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 3bcfd161c53f..ee91481131ad 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
@@ -25,47 +26,30 @@
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
-struct rcar_du_lvds_connector {
- struct rcar_du_connector connector;
-
- struct {
- unsigned int width_mm; /* Panel width in mm */
- unsigned int height_mm; /* Panel height in mm */
- struct videomode mode;
- } panel;
-};
-
-#define to_rcar_lvds_connector(c) \
- container_of(c, struct rcar_du_lvds_connector, connector.connector)
-
static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
{
- struct rcar_du_lvds_connector *lvdscon =
- to_rcar_lvds_connector(connector);
- struct drm_display_mode *mode;
-
- mode = drm_mode_create(connector->dev);
- if (mode == NULL)
- return 0;
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
- mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
-
- drm_display_mode_from_videomode(&lvdscon->panel.mode, mode);
-
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_panel_get_modes(rcon->panel);
}
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
};
+static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+ drm_panel_detach(rcon->panel);
+ drm_connector_cleanup(connector);
+}
+
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
+ .destroy = rcar_du_lvds_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -75,27 +59,19 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_lvds_connector *lvdscon;
+ struct rcar_du_connector *rcon;
struct drm_connector *connector;
- struct display_timing timing;
int ret;
- lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
- if (lvdscon == NULL)
+ rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+ if (rcon == NULL)
return -ENOMEM;
- ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
- return ret;
-
- videomode_from_timing(&timing, &lvdscon->panel.mode);
+ connector = &rcon->connector;
- of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
- of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
-
- connector = &lvdscon->connector.connector;
- connector->display_info.width_mm = lvdscon->panel.width_mm;
- connector->display_info.height_mm = lvdscon->panel.height_mm;
+ rcon->panel = of_drm_find_panel(np);
+ if (!rcon->panel)
+ return -EPROBE_DEFER;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -112,7 +88,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- lvdscon->connector.encoder = renc;
+ ret = drm_panel_attach(rcon->panel, connector);
+ if (ret < 0)
+ return ret;
+
+ rcon->encoder = renc;
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index e3a4985f6f3f..1661f6201210 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -31,6 +31,7 @@ struct rcar_du_lvdsenc {
bool enabled;
enum rcar_lvds_input input;
+ enum rcar_lvds_mode mode;
};
static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
@@ -61,7 +62,7 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
/* Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
*/
- lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+ lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
if (rcrtc->index == 2)
lvdcr0 |= LVDCR0_DUSEL;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
@@ -114,7 +115,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
* Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
- lvdcr0 = LVDCR0_PLLON;
+ lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
lvdcr0 |= LVDCR0_PWD;
@@ -211,6 +212,12 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
mode->clock = clamp(mode->clock, 25175, 148500);
}
+void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode)
+{
+ lvds->mode = mode;
+}
+
static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index dfdba746edf4..7218ac89333e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -26,8 +26,17 @@ enum rcar_lvds_input {
RCAR_LVDS_INPUT_DU2,
};
+/* Keep in sync with the LVDCR0.LVMD hardware register values. */
+enum rcar_lvds_mode {
+ RCAR_LVDS_MODE_JEIDA = 0,
+ RCAR_LVDS_MODE_MIRROR = 1,
+ RCAR_LVDS_MODE_VESA = 4,
+};
+
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
+void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode);
int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
struct drm_crtc *crtc, bool enable);
void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
@@ -37,6 +46,10 @@ static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
{
return 0;
}
+static inline void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode)
+{
+}
static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
struct drm_crtc *crtc, bool enable)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index fedb0161e234..d5bae99d3cfe 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -277,6 +277,29 @@
#define DEFR10_TSEL_H3_TCON1 (0 << 1) /* DEFR102 register only (DU2/DU3) */
#define DEFR10_DEFE10 (1 << 0)
+#define DPLLCR 0x20044
+#define DPLLCR_CODE (0x95 << 24)
+#define DPLLCR_PLCS1 (1 << 23)
+/*
+ * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20
+ * isn't implemented by other SoC in the Gen3 family it can safely be set
+ * unconditionally.
+ */
+#define DPLLCR_PLCS0 (3 << 20)
+#define DPLLCR_CLKE (1 << 18)
+#define DPLLCR_FDPLL(n) ((n) << 12)
+#define DPLLCR_N(n) ((n) << 5)
+#define DPLLCR_M(n) ((n) << 3)
+#define DPLLCR_STBY (1 << 2)
+#define DPLLCR_INCS_DOTCLKIN0 (0 << 0)
+#define DPLLCR_INCS_DOTCLKIN1 (1 << 1)
+
+#define DPLLC2R 0x20048
+#define DPLLC2R_CODE (0x95 << 24)
+#define DPLLC2R_SELC (1 << 12)
+#define DPLLC2R_M(n) ((n) << 8)
+#define DPLLC2R_FDPLL(n) ((n) << 0)
+
/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
deleted file mode 100644
index 8d6125c1c0f9..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
-#include "rcar_du_vgacon.h"
-
-static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
-{
- return 0;
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = rcar_du_vga_connector_get_modes,
-};
-
-static enum drm_connector_status
-rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_connector *rcon;
- struct drm_connector *connector;
- int ret;
-
- rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
- if (rcon == NULL)
- return -ENOMEM;
-
- connector = &rcon->connector;
- connector->display_info.width_mm = 0;
- connector->display_info.height_mm = 0;
- connector->interlace_allowed = true;
-
- ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
- if (ret < 0)
- return ret;
-
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_object_property_set_value(&connector->base,
- rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
deleted file mode 100644
index 112f50316e01..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_VGACON_H__
-#define __RCAR_DU_VGACON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc);
-
-#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 510dcc9c6816..f1d0f1824528 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -68,7 +68,7 @@ void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
#else
-static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; };
+static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; };
static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
new file mode 100644
index 000000000000..7539626b8ebd
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -0,0 +1,100 @@
+/*
+ * R-Car Gen3 HDMI PHY
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/bridge/dw_hdmi.h>
+
+#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */
+#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */
+#define RCAR_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */
+
+struct rcar_hdmi_phy_params {
+ unsigned long mpixelclock;
+ u16 opmode_div; /* Mode of operation and PLL dividers */
+ u16 curr_gmp; /* PLL current and Gmp (conductance) */
+ u16 div; /* PLL dividers */
+};
+
+static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = {
+ { 35500000, 0x0003, 0x0344, 0x0328 },
+ { 44900000, 0x0003, 0x0285, 0x0128 },
+ { 71000000, 0x0002, 0x1184, 0x0314 },
+ { 90000000, 0x0002, 0x1144, 0x0114 },
+ { 140250000, 0x0001, 0x20c4, 0x030a },
+ { 182750000, 0x0001, 0x2084, 0x010a },
+ { 281250000, 0x0000, 0x0084, 0x0305 },
+ { 297000000, 0x0000, 0x0084, 0x0105 },
+ { ~0UL, 0x0000, 0x0000, 0x0000 },
+};
+
+static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
+ const struct dw_hdmi_plat_data *pdata,
+ unsigned long mpixelclock)
+{
+ const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params;
+
+ for (; params && params->mpixelclock != ~0UL; ++params) {
+ if (mpixelclock <= params->mpixelclock)
+ break;
+ }
+
+ if (params->mpixelclock == ~0UL)
+ return -EINVAL;
+
+ dw_hdmi_phy_i2c_write(hdmi, params->opmode_div,
+ RCAR_HDMI_PHY_OPMODE_PLLCFG);
+ dw_hdmi_phy_i2c_write(hdmi, params->curr_gmp,
+ RCAR_HDMI_PHY_PLLCURRGMPCTRL);
+ dw_hdmi_phy_i2c_write(hdmi, params->div, RCAR_HDMI_PHY_PLLDIVCTRL);
+
+ return 0;
+}
+
+static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
+ .configure_phy = rcar_hdmi_phy_configure,
+};
+
+static int rcar_dw_hdmi_probe(struct platform_device *pdev)
+{
+ return dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data);
+}
+
+static int rcar_dw_hdmi_remove(struct platform_device *pdev)
+{
+ dw_hdmi_remove(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_dw_hdmi_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-hdmi" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
+
+static struct platform_driver rcar_dw_hdmi_platform_driver = {
+ .probe = rcar_dw_hdmi_probe,
+ .remove = rcar_dw_hdmi_remove,
+ .driver = {
+ .name = "rcar-dw-hdmi",
+ .of_match_table = rcar_dw_hdmi_of_table,
+ },
+};
+
+module_platform_driver(rcar_dw_hdmi_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 HDMI Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 412240a3ba90..e44626a2e698 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,37 +1020,44 @@ out_unlock:
return ret;
}
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+static bool ttm_bo_places_compat(const struct ttm_place *places,
+ unsigned num_placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
- int i;
-
- for (i = 0; i < placement->num_placement; i++) {
- const struct ttm_place *heap = &placement->placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
+ unsigned i;
- *new_flags = heap->flags;
- if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
- return true;
- }
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
- for (i = 0; i < placement->num_busy_placement; i++) {
- const struct ttm_place *heap = &placement->busy_placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
+ if (mem->mm_node && (mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
+ (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
+ return false;
+}
+
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
+{
+ if (ttm_bo_places_compat(placement->placement, placement->num_placement,
+ mem, new_flags))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_bo_places_compat(placement->busy_placement,
+ placement->num_busy_placement,
+ mem, new_flags))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 35ffb3754feb..9f53df95f35c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+ pfn = bdev->driver->io_mem_pfn(bo, page_offset);
else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
@@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ + page_offset;
+}
+EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_ref_object_release(&ref->kref);
}
+ spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- spin_unlock(&tdev->object_lock);
kfree(tdev);
}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 70ec8ca8d9b1..4e8e27d50922 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 4c7f24a67a2e..35bf781e418e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b399f03a988d..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -726,12 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Wait invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -770,12 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Fence signaled invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1022,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1035,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1095,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index fa1037ec8e5f..7d591f653dfa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -546,7 +546,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -730,7 +730,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 41b9d20d6ae7..7681341fe32b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -890,17 +893,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -928,17 +930,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 1ab9bceee755..8cdf9e4ae772 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -2,4 +2,8 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
- ipu-pre.o ipu-prg.o ipu-smfc.o ipu-vdi.o
+ ipu-smfc.o ipu-vdi.o
+
+ifdef CONFIG_DRM
+ imx-ipu-v3-objs += ipu-pre.o ipu-prg.o
+endif
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 7aefccec31b1..16d556816b5f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1401,7 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
ipu->id = of_alias_get_id(np, "ipu");
- if (of_device_is_compatible(np, "fsl,imx6qp-ipu")) {
+ if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
+ IS_ENABLED(CONFIG_DRM)) {
ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
"fsl,prg", ipu->id);
if (!ipu->prg_priv)
@@ -1538,8 +1539,10 @@ static struct platform_driver imx_ipu_driver = {
};
static struct platform_driver * const drivers[] = {
+#if IS_ENABLED(CONFIG_DRM)
&ipu_pre_drv,
&ipu_prg_drv,
+#endif
&imx_ipu_driver,
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index ca5759c0c318..43a6cb078193 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
name = "accel_3d";
channel_spec = accel_3d_channels;
channel_size = sizeof(accel_3d_channels);
+ indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
} else {
name = "gravity";
channel_spec = gravity_channels;
channel_size = sizeof(gravity_channels);
+ indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
}
ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
&accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
goto error_free_dev_mem;
}
- indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &accel_3d_info;
indio_dev->name = name;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index d6c372bb433b..c17596f7ed2c 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
if (ret < 0)
break;
-
+ ret = IIO_VAL_INT;
*val = data;
break;
case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
st->core.calib[i] =
st->core.resp->sensor_offset.offset[i];
-
+ ret = IIO_VAL_INT;
*val = st->core.calib[idx];
break;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 7afdac42ed42..01e02b9926d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
{
struct hid_sensor_hub_attribute_info timestamp;
+ s32 value;
+ int ret;
hid_sensor_get_reporting_interval(hsdev, usage_id, st);
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
st->sensitivity.index, st->sensitivity.report_id,
timestamp.index, timestamp.report_id);
+ ret = sensor_hub_get_feature(hsdev,
+ st->power_state.report_id,
+ st->power_state.index, sizeof(value), &value);
+ if (ret < 0)
+ return ret;
+ if (value < 0)
+ return -EINVAL;
+
return 0;
}
EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa886f72..821919dd245b 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
#define BMG160_DEF_BW 100
#define BMG160_REG_PMU_BW_RES BIT(7)
+#define BMG160_GYRO_REG_RESET 0x14
+#define BMG160_GYRO_RESET_VAL 0xb6
+
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
int ret;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 30ms after
+ * reset is required according to the datasheet.
+ */
+ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+ BMG160_GYRO_RESET_VAL);
+ usleep_range(30000, 30700);
+
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d18ded45bedd..3ff91e02fee3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
- tmp = (s64)vals[0] * 1000000000LL >> vals[1];
- tmp1 = do_div(tmp, 1000000000LL);
- tmp0 = tmp;
- return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
+ tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
+ tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5f2680855552..fd0edca0e656 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = true,
+ .bootime = 2,
},
};
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 1dfd1085a04f..9ca691d6c13b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
sizeof(avmb1_carddef))))
return -EFAULT;
cdef.cardtype = AVM_CARDTYPE_B1;
+ cdef.cardnr = 0;
} else {
if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_extcarddef))))
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index e4c2c1a1e993..6735c8d6a445 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
*result = true;
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
- from_cblock(begin), &cmd->dirty_cursor);
+ from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
return 0;
}
+ begin = to_cblock(from_cblock(begin) + 1);
+ if (begin == end)
+ break;
+
r = dm_bitset_cursor_next(&cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
}
-
- begin = to_cblock(from_cblock(begin) + 1);
}
dm_bitset_cursor_end(&cmd->dirty_cursor);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..1e217ba84d09 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
return r;
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 28955b94d2b2..0b081d170087 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
+ blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
return BLK_MQ_RQ_QUEUE_BUSY;
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a3d922..78f36012eaca 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
block = fec_buffer_rs_block(v, fio, n, i);
res = fec_decode_rs8(v, fio, block, &par[offset], neras);
if (res < 0) {
- dm_bufio_release(buf);
-
r = res;
goto error;
}
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
done:
r = corrected;
error:
+ dm_bufio_release(buf);
+
if (r < 0 && neras)
DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
&is_zero) == 0) {
/* skip known zero blocks entirely */
if (is_zero)
- continue;
+ goto done;
/*
* skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
+ if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+ DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+ return -EIO;
+ }
+
+ fio->level++;
+
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block += v->data_blocks;
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (r < 0) {
r = fec_decode_rsb(v, io, fio, rsb, offset, true);
if (r < 0)
- return r;
+ goto done;
}
if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
r = verity_for_bv_block(v, io, iter, fec_bv_copy);
}
+done:
+ fio->level--;
return r;
}
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
memset(fio->bufs, 0, sizeof(fio->bufs));
fio->nbufs = 0;
fio->output = NULL;
+ fio->level = 0;
}
/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298b995e..bb31ce87a933 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -27,6 +27,9 @@
#define DM_VERITY_FEC_BUF_MAX \
(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION 4
+
#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
unsigned nbufs; /* number of buffers allocated */
u8 *output; /* buffer for corrected output */
size_t output_pos;
+ unsigned level; /* recursion level */
};
#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index d05fbfdce5e5..5d6c40d86775 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -100,11 +100,6 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
goto err_exit;
ndev->mtu = new_mtu;
- if (netif_running(ndev)) {
- aq_ndev_close(ndev);
- aq_ndev_open(ndev);
- }
-
err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ee78444bfb88..cdb02991f249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
+ dx_buff->is_ipv6 =
+ (ip_hdr(skb)->version == 6) ? 1U : 0U;
+
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
1U : 0U;
- dx_buff->is_tcp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
- dx_buff->is_udp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+
+ if (ip_hdr(skb)->version == 4) {
+ dx_buff->is_tcp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
+ 1U : 0U;
+ dx_buff->is_udp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
+ 1U : 0U;
+ } else if (ip_hdr(skb)->version == 6) {
+ dx_buff->is_tcp_cso =
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
+ 1U : 0U;
+ dx_buff->is_udp_cso =
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
+ 1U : 0U;
+ }
}
for (; nr_frags--; ++frag_count) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0358e6072d45..3a8a4aa13687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
+ spin_lock_init(&self->header.lock);
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 257254645068..eecd6d1c4d73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
u8 len_l2;
u8 len_l3;
u8 len_l4;
- u8 rsvd2;
+ u8 is_ipv6:1;
+ u8 rsvd2:7;
u32 len_pkt;
};
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index a2b746a2dd50..4ee15ff06a44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff->len_l3 +
buff->len_l2);
is_gso = true;
+
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
} else {
buff_pa_len = buff->len;
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+ is_gso = false;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cab2931dab9a..42150708191d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff->len_l3 +
buff->len_l2);
is_gso = true;
+
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
} else {
buff_pa_len = buff->len;
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+ is_gso = false;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0a23034bbe3f..352beff796ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
-#define HW_INTERRUT_ASSERT_SET_0 \
+#define HW_INTERRUPT_ASSERT_SET_0 \
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_1 \
+#define HW_INTERRUPT_ASSERT_SET_1 \
(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_2 \
+#define HW_INTERRUPT_ASSERT_SET_2 \
(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ac76fc251d26..a851f95c307a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
bnx2x_release_phy_lock(bp);
}
- if (attn & HW_INTERRUT_ASSERT_SET_0) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_0) {
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
bnx2x_panic();
}
}
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
BNX2X_ERR("FATAL error from DORQ\n");
}
- if (attn & HW_INTERRUT_ASSERT_SET_1) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_1) {
int port = BP_PORT(bp);
int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
bnx2x_panic();
}
}
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
}
}
- if (attn & HW_INTERRUT_ASSERT_SET_2) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_2) {
int port = BP_PORT(bp);
int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
bnx2x_panic();
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 32de4589d16a..1f1e54ba0ecb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
- dma_unmap_single(&pdev->dev, rx_buf->mapping,
- bp->rx_buf_use_size, bp->rx_dir);
-
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp))
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ mapping -= bp->rx_dma_offset;
+ dma_unmap_page(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir);
__free_page(data);
- else
+ } else {
+ dma_unmap_single(&pdev->dev, mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir);
kfree(data);
+ }
}
for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0;
}
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+}
+
static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
@@ -4732,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
- rc, i);
+ i, rc);
return rc;
}
}
@@ -5006,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
{
+ bnxt_init_cp_rings(bp);
bnxt_init_rx_rings(bp);
bnxt_init_tx_rings(bp);
bnxt_init_ring_grps(bp, irq_re_init);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663a6ead..0f6811860ad5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
bfa_ioc_send_enable(struct bfa_ioc *ioc)
{
struct bfi_ioc_ctrl_req enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = htons(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = ntohl(tv.tv_sec);
+ enable_req.rsvd = htons(0);
+ /* overflow in 2106 */
+ enable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
}
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = htons(ioc->clscode);
+ disable_req.rsvd = htons(0);
+ /* overflow in 2106 */
+ disable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e855004c57..02dd5246dfae 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4939,8 +4939,9 @@ static int
__be_cmd_set_logical_link_config(struct be_adapter *adapter,
int link_state, int version, u8 domain)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
+ struct be_mcc_wrb *wrb;
+ u32 link_config = 0;
int status;
mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
link_state == IFLA_VF_LINK_STATE_AUTO)
- req->link_config |= PLINK_ENABLE;
+ link_config |= PLINK_ENABLE;
if (link_state == IFLA_VF_LINK_STATE_AUTO)
- req->link_config |= PLINK_TRACK;
+ link_config |= PLINK_TRACK;
+
+ req->link_config = cpu_to_le32(link_config);
status = be_mcc_notify_wait(adapter);
err:
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 992ebe973d25..f819843e2bae 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
- if (work_done < budget) {
+ if ((work_done < budget) && napi_complete_done(napi, work_done)) {
u32 buf_int_enable_value = 0;
- napi_complete_done(napi, work_done);
-
/* set tx_done and rx_rdy bits */
buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 928b0df2b8e0..ade6b3e4ed13 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,8 +28,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <net/ip.h>
#include <net/ncsi.h>
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3239d27143b9..bdd8cdd732fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
else
*link_status = 0;
- ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
- if (!ret)
- *link_status = *link_status && sfp_prsnt;
+ if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+ }
mac_cb->link = *link_status;
}
@@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
of_node_put(np);
np = of_parse_phandle(to_of_node(mac_cb->fw_port),
- "serdes-syscon", 0);
+ "serdes-syscon", 0);
syscon = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR_OR_NULL(syscon)) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 90dbda792614..403ea9db6dbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
mac_key->high.bits.mac_3 = addr[3];
mac_key->low.bits.mac_4 = addr[4];
mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.port_vlan = 0;
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
/* find the tcam entry index for promisc */
entry_index = dsaf_promisc_tcam_entry(port);
+ memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+ memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+
/* config key mask */
if (enable) {
- memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
- memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a2c22d084ce9..e13aa064a8e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
return 0;
}
+int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -ENODEV;
+
+ *sfp_prsnt = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
- misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
} else {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2175cced402f..e9af89ad039c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
/* Quiesce the device without resetting the hardware */
e1000e_down(adapter, false);
e1000_free_irq(adapter);
- e1000e_reset_interrupt_capability(adapter);
}
+ e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..82a95cc2c8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
@@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 55957246c0e8..b5d5519542e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
- struct netdev_lag_upper_info *lag_upper_info;
+ struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded;
int bond_status = 0;
int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!netif_is_lag_master(upper))
return 0;
- lag_upper_info = info->upper_info;
+ if (info->linking)
+ lag_upper_info = info->upper_info;
/* The event may still be of interest if the slave does not belong to
* us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 06c9f4100cb9..6ad44be08b33 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
+#include <linux/circ_buf.h>
#include "moxart_ether.h"
@@ -278,6 +279,13 @@ rx_next:
return rx;
}
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
static void moxart_tx_finished(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
tx_tail = TX_NEXT(tx_tail);
}
priv->tx_tail = tx_tail;
+ if (netif_queue_stopped(ndev) &&
+ moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+ netif_wake_queue(ndev);
}
static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
- unsigned int tx_head = priv->tx_head;
+ unsigned int tx_head;
u32 txdes1;
int ret = NETDEV_TX_BUSY;
+ spin_lock_irq(&priv->txlock);
+
+ tx_head = priv->tx_head;
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
- spin_lock_irq(&priv->txlock);
+ if (moxart_tx_queue_space(ndev) == 1)
+ netif_stop_queue(ndev);
+
if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563ac7c6..afc32ec998c0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
#define TX_BUF_SIZE 1600
#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD 16
#define RX_DESC_NUM 64
#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..a41377e26c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unregister_netdev(nn->netdev);
+
if (nn->xdp_prog)
bpf_prog_put(nn->xdp_prog);
if (nn->bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
- unregister_netdev(nn->netdev);
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7cd76b6b5cb9..2ae852454780 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
{
bool want[OFDPA_CTRL_MAX] = { 0, };
bool prev_ctrls[OFDPA_CTRL_MAX];
- u8 uninitialized_var(prev_state);
+ u8 prev_state;
int err;
int i;
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
- prev_state = ofdpa_port->stp_state;
- }
-
- if (ofdpa_port->stp_state == state)
+ prev_state = ofdpa_port->stp_state;
+ if (prev_state == state)
return 0;
+ memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
ofdpa_port->stp_state = state;
switch (state) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9f3d9c67e3fe..fa674a8bda0c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
+ struct phy_device *phy;
struct cpsw_common *cpsw = priv->cpsw;
soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
if (slave->data->phy_node) {
- slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if);
- if (!slave->phy) {
+ if (!phy) {
dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
slave->data->phy_node->full_name,
slave->slave_num);
return;
}
} else {
- slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
- if (IS_ERR(slave->phy)) {
+ if (IS_ERR(phy)) {
dev_err(priv->dev,
"phy \"%s\" not found on slave %d, err %ld\n",
slave->data->phy_id, slave->slave_num,
- PTR_ERR(slave->phy));
- slave->phy = NULL;
+ PTR_ERR(phy));
return;
}
}
+ slave->phy = phy;
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
}
cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ffedad2a360a..15b920086251 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
memset(rd, 0, sizeof(*rd));
rd->hw = hwmap + i;
rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
- if (rd->buf == NULL ||
- !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+ if (rd->buf)
+ busaddr = pci_map_single(pdev, rd->buf, len, dir);
+ if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
if (rd->buf) {
net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
__func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
rd = r->rd + j;
busaddr = rd_get_addr(rd);
rd_set_addr_status(rd, 0, 0);
- if (busaddr)
- pci_unmap_single(pdev, busaddr, len, dir);
+ pci_unmap_single(pdev, busaddr, len, dir);
kfree(rd->buf);
rd->buf = NULL;
}
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 6b988f77da08..61941e29daae 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
return 0;
}
+EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1be69d8bc909..a2bfc82e95d7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev)
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f5552aaaa77a..f3ae88fdf332 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
#define LENOVO_VENDOR_ID 0x17ef
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
+#define MICROSOFT_VENDOR_ID 0x045e
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0b1b9188625d..07f788c49d57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -517,6 +517,7 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_NVIDIA 0x0955
@@ -1294,6 +1295,7 @@ static void intr_callback(struct urb *urb)
}
} else {
if (netif_carrier_ok(tp->netdev)) {
+ netif_stop_queue(tp->netdev);
set_bit(RTL8152_LINK_CHG, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
@@ -3169,6 +3171,9 @@ static void set_carrier(struct r8152 *tp)
napi_enable(&tp->napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
+ } else if (netif_queue_stopped(netdev) &&
+ skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+ netif_wake_queue(netdev);
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3702,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf)
tp->rtl_ops.autosuspend_en(tp, false);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
- if (netif_carrier_ok(tp->netdev))
- rtl_start_rx(tp);
+
+ if (netif_carrier_ok(tp->netdev)) {
+ if (rtl8152_get_speed(tp) & LINK_STATUS) {
+ rtl_start_rx(tp);
+ } else {
+ netif_carrier_off(tp->netdev);
+ tp->rtl_ops.disable(tp);
+ netif_info(tp, link, tp->netdev,
+ "linking down\n");
+ }
+ }
+
napi_enable(&tp->napi);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
smp_mb__after_atomic();
@@ -4507,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index de19c7c92bc6..85d949e03f79 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_cfg80211_vif *vif;
+ enum nl80211_iftype iftype;
bool wait_for_disable = false;
int err;
brcmf_dbg(TRACE, "delete P2P vif\n");
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ iftype = vif->wdev.iftype;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- switch (vif->wdev.iftype) {
+ switch (iftype) {
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
BRCMF_P2P_DISABLE_TIMEOUT);
err = 0;
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
brcmf_vif_clear_mgmt_ies(vif);
err = brcmf_p2p_release_p2p_if(vif);
}
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
brcmf_remove_interface(vif->ifp, true);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE)
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
return err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a260cd503200..077bfd8f4c0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (ret)
return ret;
+ if (count == 0)
+ return 0;
iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 99132ea16ede..c5734e1a02d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
qmask |= BIT(vif->hw_queue[ac]);
}
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
qmask |= BIT(vif->cab_queue);
return qmask;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6927caecd48e..486dcceed17a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2401,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
return;
rcu_read_lock();
- sta = mvm->fw_id_to_mac_id[notif->sta_id];
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b51a2853cc80..9d28db7f56aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
@@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* enabled-cab_queue to the mask)
*/
if (iwl_mvm_is_dqa_supported(mvm) &&
- vif->type == NL80211_IFTYPE_AP) {
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvmvif->bcast_sta.sta_id,
@@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 3f37075f4cde..1ba0a6f55503 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -506,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
/*
* Handle legacy hostapd as well, where station may be added
* only after assoc. Take care of the case where we send a
@@ -517,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
- WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
@@ -584,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP) {
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
sta_id = mvmvif->bcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index caea350f05aa..bdc379178e87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
unsigned long flags;
struct rtl_c2hcmd *c2hcmd;
- c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+ c2hcmd = kmalloc(sizeof(*c2hcmd),
+ in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!c2hcmd)
goto label_err;
- c2hcmd->val = kmalloc(len, GFP_KERNEL);
+ c2hcmd->val = kmalloc(len,
+ in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!c2hcmd->val)
goto label_err2;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b3b57fef446..9583a5f58a1d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.nr = segments - 1;
+ cmnd->dsm.nr = cpu_to_le32(segments - 1);
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
req->special_vec.bv_page = virt_to_page(range);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a7bcff45f437..76450b0c55f1 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
u16 status;
WARN_ON(req == NULL || slog == NULL);
- if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+ if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
status = nvmet_get_smart_log_all(req, slog);
else
status = nvmet_get_smart_log_nsid(req, slog);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..6b0baa9caab9 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
- nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+ nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
(req->ns->blksize_shift - 9)) + 1;
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_execute_dsm;
- req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
+ req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index dfb8a69afc28..d2d2ba5b8a68 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -89,6 +89,7 @@ config PCI_HISI
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
+ select PCI_HOST_COMMON
help
Say Y here if you want PCIe controller support on HiSilicon
Hip05 and Hip06 SoCs
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index fcd3ef845883..6d23683c0892 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
return 0;
}
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
artpec6_pcie->pci = pci;
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index b6c832ba39dd..f20d494922ab 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
return 0;
}
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index b89c373555c5..6e031b522529 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -375,7 +375,6 @@ static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
index -= node * PEM_MAX_DOM_IN_NODE;
res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
FIELD_PREP(PEM_INDX_MASK, index);
- res_pem->end = res_pem->start + SZ_16M - 1;
res_pem->flags = IORESOURCE_MEM;
}
@@ -399,8 +398,15 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
*/
if (ret) {
thunder_pem_legacy_fw(root, res_pem);
- /* Reserve PEM-specific resources and PCI configuration space */
+ /*
+ * Reserve 64K size PEM specific resources. The full 16M range
+ * size is required for thunder_pem_init() call.
+ */
+ res_pem->end = res_pem->start + SZ_64K - 1;
thunder_pem_reserve_range(dev, root->segment, res_pem);
+ res_pem->end = res_pem->start + SZ_16M - 1;
+
+ /* Reserve PCI configuration space as well. */
thunder_pem_reserve_range(dev, root->segment, &cfg->res);
}
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index d69046537b75..32822b0d9cd0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2010,29 +2010,57 @@ out_err:
return ERR_PTR(ret);
}
-static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
+static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
{
pctldev->p = create_pinctrl(pctldev->dev, pctldev);
- if (!IS_ERR(pctldev->p)) {
- kref_get(&pctldev->p->users);
- pctldev->hog_default =
- pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pctldev->hog_default)) {
- dev_dbg(pctldev->dev,
- "failed to lookup the default state\n");
- } else {
- if (pinctrl_select_state(pctldev->p,
- pctldev->hog_default))
- dev_err(pctldev->dev,
- "failed to select default state\n");
- }
+ if (PTR_ERR(pctldev->p) == -ENODEV) {
+ dev_dbg(pctldev->dev, "no hogs found\n");
- pctldev->hog_sleep =
- pinctrl_lookup_state(pctldev->p,
- PINCTRL_STATE_SLEEP);
- if (IS_ERR(pctldev->hog_sleep))
- dev_dbg(pctldev->dev,
- "failed to lookup the sleep state\n");
+ return 0;
+ }
+
+ if (IS_ERR(pctldev->p)) {
+ dev_err(pctldev->dev, "error claiming hogs: %li\n",
+ PTR_ERR(pctldev->p));
+
+ return PTR_ERR(pctldev->p);
+ }
+
+ kref_get(&pctldev->p->users);
+ pctldev->hog_default =
+ pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(pctldev->hog_default)) {
+ dev_dbg(pctldev->dev,
+ "failed to lookup the default state\n");
+ } else {
+ if (pinctrl_select_state(pctldev->p,
+ pctldev->hog_default))
+ dev_err(pctldev->dev,
+ "failed to select default state\n");
+ }
+
+ pctldev->hog_sleep =
+ pinctrl_lookup_state(pctldev->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(pctldev->hog_sleep))
+ dev_dbg(pctldev->dev,
+ "failed to lookup the sleep state\n");
+
+ return 0;
+}
+
+int pinctrl_enable(struct pinctrl_dev *pctldev)
+{
+ int error;
+
+ error = pinctrl_claim_hogs(pctldev);
+ if (error) {
+ dev_err(pctldev->dev, "could not claim hogs: %i\n",
+ error);
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+
+ return error;
}
mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
return 0;
}
+EXPORT_SYMBOL_GPL(pinctrl_enable);
/**
* pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
if (IS_ERR(pctldev))
return pctldev;
- error = pinctrl_create_and_start(pctldev);
- if (error) {
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ error = pinctrl_enable(pctldev);
+ if (error)
return ERR_PTR(error);
- }
return pctldev;
}
EXPORT_SYMBOL_GPL(pinctrl_register);
+/**
+ * pinctrl_register_and_init() - register and init pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ * @pctldev: pin controller device
+ *
+ * Note that pinctrl_enable() still needs to be manually called after
+ * this once the driver is ready.
+ */
int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev)
{
struct pinctrl_dev *p;
- int error;
p = pinctrl_init_controller(pctldesc, dev, driver_data);
if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
*/
*pctldev = p;
- error = pinctrl_create_and_start(p);
- if (error) {
- mutex_destroy(&p->mutex);
- kfree(p);
- *pctldev = NULL;
-
- return error;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a7ace9e1ad81..74bd90dfd7b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
- return 0;
+ return pinctrl_enable(ipctl->pctl);
free:
imx_free_resources(ipctl);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8b2d45e85bae..9c267dcda094 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins at pa %p size %u\n",
pcs->desc.npins, pcs->base, pcs->size);
- return 0;
+ return pinctrl_enable(pcs->pctl);
free:
pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 08150a321be6..a70157f0acf4 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pctl_desc.pins = pmx->pins;
pmx->pctl_desc.npins = pfc->info->nr_pins;
- return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
- &pmx->pctl);
+ ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
+ &pmx->pctl);
+ if (ret) {
+ dev_err(pfc->dev, "could not register: %i\n", ret);
+
+ return ret;
+ }
+
+ return pinctrl_enable(pmx->pctl);
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 717e3404900c..362c50918c13 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
+ return pinctrl_enable(iod->pctl);
+
exit_out:
of_node_put(np);
return ret;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 40f1136f5568..058db724b5a2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
rc = -EIO;
goto out;
}
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
}
/*
- * Fetch just the mkvp value via query_crypto_facility from adapter.
+ * Fetch the current and old mkvp values via
+ * query_crypto_facility from adapter.
*/
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
{
int rc, found = 0;
size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
rc = query_crypto_facility(cardnr, domain, "STATICSA",
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
- if (rarray[64] == '2') {
+ if (rarray[8*8] == '2') {
/* current master key state is valid */
- *mkvp = *((u64 *)(varray + 184));
+ mkvp[0] = *((u64 *)(varray + 184));
+ mkvp[1] = *((u64 *)(varray + 172));
found = 1;
}
}
@@ -796,14 +804,14 @@ struct mkvp_info {
struct list_head list;
u16 cardnr;
u16 domain;
- u64 mkvp;
+ u64 mkvp[2];
};
/* a list with mkvp_info entries */
static LIST_HEAD(mkvp_list);
static DEFINE_SPINLOCK(mkvp_list_lock);
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
{
int rc = -ENOENT;
struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
- *mkvp = ptr->mkvp;
+ memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
rc = 0;
break;
}
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
return rc;
}
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
{
int found = 0;
struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
- ptr->mkvp = mkvp;
+ memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
found = 1;
break;
}
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
}
ptr->cardnr = cardnr;
ptr->domain = domain;
- ptr->mkvp = mkvp;
+ memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
list_add(&ptr->list, &mkvp_list);
}
spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
struct zcrypt_device_matrix *device_matrix;
u16 card, dom;
- u64 mkvp;
- int i, rc;
+ u64 mkvp[2];
+ int i, rc, oi = -1;
/* mkvp must not be zero */
if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
device_matrix->device[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
- if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
- t->mkvp == mkvp) {
+ if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
+ t->mkvp == mkvp[0]) {
if (!verify)
break;
/* verify: fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp)
+ if (t->mkvp == mkvp[0])
break;
}
}
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
card = AP_QID_CARD(device_matrix->device[i].qid);
dom = AP_QID_QUEUE(device_matrix->device[i].qid);
/* fresh fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp)
+ if (t->mkvp == mkvp[0])
break;
+ if (t->mkvp == mkvp[1] && oi < 0)
+ oi = i;
}
}
+ if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+ /* old mkvp matched, use this card then */
+ card = AP_QID_CARD(device_matrix->device[oi].qid);
+ dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+ }
}
- if (i < MAX_ZDEV_ENTRIES) {
+ if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7addea8741b..d9561e39c3b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+ int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int, int, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 315d8a2db7c0..9a5f99ccb122 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
* @card: qeth card structure, to check max. elems.
* @skb: SKB address
* @extra_elems: extra elems needed, to check against max.
+ * @data_offset: range starts at skb->data + data_offset
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
* Note: extra_elems is not included in the returned result.
*/
int qeth_get_elements_no(struct qeth_card *card,
- struct sk_buff *skb, int extra_elems)
+ struct sk_buff *skb, int extra_elems, int data_offset)
{
int elements = qeth_get_elements_for_range(
- (addr_t)skb->data,
+ (addr_t)skb->data + data_offset,
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bea483307618..af4e6a639fec 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* chaining we can not send long frag lists
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
- !qeth_get_elements_no(card, new_skb, 0)) {
+ !qeth_get_elements_no(card, new_skb, 0, 0)) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- elements = qeth_get_elements_no(card, new_skb, elements_needed);
+ elements = qeth_get_elements_no(card, new_skb, elements_needed,
+ (data_offset > 0) ? data_offset : 0);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06d0addcc058..653f0fb76573 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
- skb_pull(skb, 14);
- card->dev->header_ops->create(skb, card->dev, 0,
- card->dev->dev_addr, card->dev->dev_addr,
- card->dev->addr_len);
- skb_pull(skb, 14);
- iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.ext_flags = 0;
- hdr->hdr.l3.length = skb->len;
+ hdr->hdr.l3.length = skb->len - ETH_HLEN;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+ iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
!skb_is_nonlinear(skb)) {
new_skb = skb;
- if (new_skb->protocol == ETH_P_AF_IUCV)
- data_offset = 0;
- else
- data_offset = ETH_HLEN;
+ data_offset = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
- (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+ (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
elements = use_tso ?
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
- qeth_get_elements_no(card, new_skb, hdr_elements);
+ qeth_get_elements_no(card, new_skb, hdr_elements,
+ (data_offset > 0) ? data_offset : 0);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 19125d72f322..e5a2d590a104 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
- blk_mq_start_stopped_hw_queues(q, false);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
- blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_run_hw_queues(q, true);
} else {
unsigned long flags;
@@ -1974,7 +1974,7 @@ out:
case BLK_MQ_RQ_QUEUE_BUSY:
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
- blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
break;
case BLK_MQ_RQ_QUEUE_ERROR:
/*
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7cbad0d45b9c..6ba270e0494d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
ret = PTR_ERR(vmfile);
goto out;
}
+ vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
}
get_file(asma->file);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1f4733b80c87..f3b089b7c0b6 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
return xenbus_command_reply(u, XS_ERROR, "ENOENT");
rc = xenbus_dev_request_and_reply(&u->u.msg, u);
- if (rc)
+ if (rc && trans) {
+ list_del(&trans->list);
kfree(trans);
+ }
out:
return rc;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15e1db8738ae..dd3f5fabfdf6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -972,6 +972,86 @@ out:
return rc;
}
+ssize_t cifs_file_copychunk_range(unsigned int xid,
+ struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags)
+{
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
+ struct cifsFileInfo *smb_file_src;
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *src_tcon;
+ struct cifs_tcon *target_tcon;
+ ssize_t rc;
+
+ cifs_dbg(FYI, "copychunk range\n");
+
+ if (src_inode == target_inode) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!src_file->private_data || !dst_file->private_data) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ goto out;
+ }
+
+ rc = -EXDEV;
+ smb_file_target = dst_file->private_data;
+ smb_file_src = src_file->private_data;
+ src_tcon = tlink_tcon(smb_file_src->tlink);
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ if (src_tcon->ses != target_tcon->ses) {
+ cifs_dbg(VFS, "source and target of copy not on same server\n");
+ goto out;
+ }
+
+ /*
+ * Note: cifs case is easier than btrfs since server responsible for
+ * checks for proper open modes and file type and if it wants
+ * server could even support copy of range where source = target
+ */
+ lock_two_nondirectories(target_inode, src_inode);
+
+ cifs_dbg(FYI, "about to flush pages\n");
+ /* should we flush first and last page first */
+ truncate_inode_pages(&target_inode->i_data, 0);
+
+ if (target_tcon->ses->server->ops->copychunk_range)
+ rc = target_tcon->ses->server->ops->copychunk_range(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
+ else
+ rc = -EOPNOTSUPP;
+
+ /* force revalidate of size and timestamps of target file now
+ * that target is updated on the server
+ */
+ CIFS_I(target_inode)->time = 0;
+ /* although unlocking in the reverse order from locking is not
+ * strictly necessary here it is a little cleaner to be consistent
+ */
+ unlock_two_nondirectories(src_inode, target_inode);
+
+out:
+ return rc;
+}
+
+static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags)
+{
+ unsigned int xid = get_xid();
+ ssize_t rc;
+
+ rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
+ len, flags);
+ free_xid(xid);
+ return rc;
+}
+
const struct file_operations cifs_file_ops = {
.read_iter = cifs_loose_read_iter,
.write_iter = cifs_file_write_iter,
@@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = {
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
@@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
@@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = {
.release = cifs_closedir,
.read = generic_read_dir,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = generic_file_llseek,
};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index da717fee3026..30bf89b1fd9a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
# define cifs_listxattr NULL
#endif
+extern ssize_t cifs_file_copychunk_range(unsigned int xid,
+ struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags);
+
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d42dd3288647..d07f13a63369 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -243,6 +243,7 @@ struct smb_version_operations {
/* verify the message */
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *,
struct cifsInodeInfo *, bool);
/* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
char * (*create_lease_buf)(u8 *, u8);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *, unsigned int *);
- int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
- struct cifsFileInfo *target_file, u64 src_off, u64 len,
- u64 dest_off);
+ ssize_t (*copychunk_range)(const unsigned int,
+ struct cifsFileInfo *src_file,
+ struct cifsFileInfo *target_file,
+ u64 src_off, u64 len, u64 dest_off);
int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
struct cifsFileInfo *target_file, u64 src_off, u64 len,
u64 dest_off);
@@ -1343,6 +1345,7 @@ struct mid_q_entry {
void *callback_data; /* general purpose pointer for callback */
void *resp_buf; /* pointer to received SMB header */
int mid_state; /* wish this were enum but can not pass to wait_event */
+ unsigned int mid_flags;
__le16 command; /* smb command code */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
@@ -1350,6 +1353,12 @@ struct mid_q_entry {
bool decrypted:1; /* decrypted entry */
};
+struct close_cancelled_open {
+ struct cifs_fid fid;
+ struct cifs_tcon *tcon;
+ struct work_struct work;
+};
+
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
#ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1490,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
+/* Flags */
+#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
+
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 97e5d236d265..ec5e5e514fdd 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -79,7 +79,8 @@ extern void cifs_delete_mid(struct mid_q_entry *mid);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
-extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
+extern int cifs_discard_remaining_data(struct TCP_Server_Info *server,
+ char *buf);
extern int cifs_call_async(struct TCP_Server_Info *server,
struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 066950671929..967b92631807 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1400,9 +1400,9 @@ openRetry:
* current bigbuf.
*/
int
-cifs_discard_remaining_data(struct TCP_Server_Info *server)
+cifs_discard_remaining_data(struct TCP_Server_Info *server, char *buf)
{
- unsigned int rfclen = get_rfc1002_length(server->smallbuf);
+ unsigned int rfclen = get_rfc1002_length(buf);
int remaining = rfclen + 4 - server->total_read;
while (remaining > 0) {
@@ -1426,7 +1426,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
int length;
struct cifs_readdata *rdata = mid->callback_data;
- length = cifs_discard_remaining_data(server);
+ length = cifs_discard_remaining_data(server, mid->resp_buf);
dequeue_mid(mid, rdata->result);
return length;
}
@@ -1459,7 +1459,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, 0)) {
- cifs_discard_remaining_data(server);
+ cifs_discard_remaining_data(server, buf);
return -1;
}
@@ -1519,6 +1519,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
rdata->iov[0].iov_base, server->total_read);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
+
/* how much data is in the response? */
data_len = server->ops->read_data_length(buf);
if (data_offset + data_len > buflen) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9ae695ae3ed7..0c7596cef4b8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p)
server->lstrp = jiffies;
if (mid_entry != NULL) {
+ if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+ mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(
+ mid_entry->resp_buf,
+ server);
+
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!server->ops->is_oplock_break ||
- !server->ops->is_oplock_break(buf, server)) {
+ } else if (server->ops->is_oplock_break &&
+ server->ops->is_oplock_break(buf, server)) {
+ cifs_dbg(FYI, "Received oplock break\n");
+ } else {
cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..265c45fe4ea5 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,71 +34,14 @@
#include "cifs_ioctl.h"
#include <linux/btrfs.h>
-static int cifs_file_clone_range(unsigned int xid, struct file *src_file,
- struct file *dst_file)
-{
- struct inode *src_inode = file_inode(src_file);
- struct inode *target_inode = file_inode(dst_file);
- struct cifsFileInfo *smb_file_src;
- struct cifsFileInfo *smb_file_target;
- struct cifs_tcon *src_tcon;
- struct cifs_tcon *target_tcon;
- int rc;
-
- cifs_dbg(FYI, "ioctl clone range\n");
-
- if (!src_file->private_data || !dst_file->private_data) {
- rc = -EBADF;
- cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
- goto out;
- }
-
- rc = -EXDEV;
- smb_file_target = dst_file->private_data;
- smb_file_src = src_file->private_data;
- src_tcon = tlink_tcon(smb_file_src->tlink);
- target_tcon = tlink_tcon(smb_file_target->tlink);
-
- if (src_tcon->ses != target_tcon->ses) {
- cifs_dbg(VFS, "source and target of copy not on same server\n");
- goto out;
- }
-
- /*
- * Note: cifs case is easier than btrfs since server responsible for
- * checks for proper open modes and file type and if it wants
- * server could even support copy of range where source = target
- */
- lock_two_nondirectories(target_inode, src_inode);
-
- cifs_dbg(FYI, "about to flush pages\n");
- /* should we flush first and last page first */
- truncate_inode_pages(&target_inode->i_data, 0);
-
- if (target_tcon->ses->server->ops->clone_range)
- rc = target_tcon->ses->server->ops->clone_range(xid,
- smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
- else
- rc = -EOPNOTSUPP;
-
- /* force revalidate of size and timestamps of target file now
- that target is updated on the server */
- CIFS_I(target_inode)->time = 0;
- /* although unlocking in the reverse order from locking is not
- strictly necessary here it is a little cleaner to be consistent */
- unlock_two_nondirectories(src_inode, target_inode);
-out:
- return rc;
-}
-
-static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
unsigned long srcfd)
{
int rc;
struct fd src_file;
struct inode *src_inode;
- cifs_dbg(FYI, "ioctl clone range\n");
+ cifs_dbg(FYI, "ioctl copychunk range\n");
/* the destination must be opened for writing */
if (!(dst_file->f_mode & FMODE_WRITE)) {
cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
if (S_ISDIR(src_inode->i_mode))
goto out_fput;
- rc = cifs_file_clone_range(xid, src_file.file, dst_file);
+ rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+ src_inode->i_size, 0);
out_fput:
fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
}
break;
case CIFS_IOC_COPYCHUNK_FILE:
- rc = cifs_ioctl_clone(xid, filep, arg);
+ rc = cifs_ioctl_copychunk(xid, filep, arg);
break;
case CIFS_IOC_SET_INTEGRITY:
if (pSMBFile == NULL)
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index fd516ea8b8f8..1a04b3a5beb1 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
return false;
}
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+ struct close_cancelled_open *cancelled = container_of(work,
+ struct close_cancelled_open, work);
+
+ cifs_dbg(VFS, "Close unmatched open\n");
+
+ SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+ cancelled->fid.volatile_fid);
+ cifs_put_tcon(cancelled->tcon);
+ kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
+ struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+ struct cifs_tcon *tcon;
+ struct close_cancelled_open *cancelled;
+
+ if (sync_hdr->Command != SMB2_CREATE ||
+ sync_hdr->Status != STATUS_SUCCESS)
+ return 0;
+
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+ tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
+ sync_hdr->TreeId);
+ if (!tcon) {
+ kfree(cancelled);
+ return -ENOENT;
+ }
+
+ cancelled->fid.persistent_fid = rsp->PersistentFileId;
+ cancelled->fid.volatile_fid = rsp->VolatileFileId;
+ cancelled->tcon = tcon;
+ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+ queue_work(cifsiod_wq, &cancelled->work);
+
+ return 0;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0231108d9387..7b12a727947e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -21,6 +21,7 @@
#include <linux/vfs.h>
#include <linux/falloc.h>
#include <linux/scatterlist.h>
+#include <linux/uuid.h>
#include <crypto/aead.h>
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
return rc;
}
-static int
-smb2_clone_range(const unsigned int xid,
+static ssize_t
+smb2_copychunk_range(const unsigned int xid,
struct cifsFileInfo *srcfile,
struct cifsFileInfo *trgtfile, u64 src_off,
u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
struct cifs_tcon *tcon;
int chunks_copied = 0;
bool chunk_sizes_updated = false;
+ ssize_t bytes_written, total_bytes_written = 0;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
if (pcchunk == NULL)
return -ENOMEM;
- cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+ cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
/* Request a key from the server to identify the source of the copy */
rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
}
chunks_copied++;
- src_off += le32_to_cpu(retbuf->TotalBytesWritten);
- dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
- len -= le32_to_cpu(retbuf->TotalBytesWritten);
+ bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
+ src_off += bytes_written;
+ dest_off += bytes_written;
+ len -= bytes_written;
+ total_bytes_written += bytes_written;
- cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
+ cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
le32_to_cpu(retbuf->ChunksWritten),
le32_to_cpu(retbuf->ChunkBytesWritten),
- le32_to_cpu(retbuf->TotalBytesWritten));
+ bytes_written);
} else if (rc == -EINVAL) {
if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
cchunk_out:
kfree(pcchunk);
kfree(retbuf);
- return rc;
+ if (rc)
+ return rc;
+ else
+ return total_bytes_written;
}
static int
@@ -2188,7 +2195,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
if (rc)
goto free_pages;
- rc = cifs_discard_remaining_data(server);
+ rc = cifs_discard_remaining_data(server, buf);
if (rc)
goto free_pages;
@@ -2214,7 +2221,7 @@ free_pages:
kfree(pages);
return rc;
discard_data:
- cifs_discard_remaining_data(server);
+ cifs_discard_remaining_data(server, buf);
goto free_pages;
}
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
.set_oplock_level = smb2_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
.set_oplock_level = smb21_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
.set_oplock_level = smb3_set_oplock_level,
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.duplicate_extents = smb2_duplicate_extents,
.validate_negotiate = smb3_validate_negotiate,
.wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
.set_oplock_level = smb3_set_oplock_level,
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.duplicate_extents = smb2_duplicate_extents,
/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
.wp_retry_size = smb2_wp_retry_size,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7446496850a3..66fa1b941cdf 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1185,6 +1185,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
return -EINVAL;
}
+ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+ if (tcon)
+ tcon->tid = 0;
+
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 69e35873b1de..6853454fc871 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+ __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ __u64 ses_id, __u32 tid);
extern int smb2_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+ struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7c3bb1bd7eed..506b67fc93d9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
return 0;
}
-struct cifs_ses *
-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+static struct cifs_ses *
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
{
struct cifs_ses *ses;
- spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid != ses_id)
continue;
- spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
+
+ return NULL;
+}
+
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+ struct cifs_ses *ses;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
spin_unlock(&cifs_tcp_ses_lock);
+ return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
+{
+ struct cifs_tcon *tcon;
+
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid != tid)
+ continue;
+ ++tcon->tc_count;
+ return tcon;
+ }
+
return NULL;
}
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
+ if (!ses) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return NULL;
+ }
+ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return tcon;
+}
+
int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 526f0533cb4e..f6e13a977fc8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
+ cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
send_cancel(ses->server, rqst, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ midQ->mid_flags |= MID_WAIT_CANCELLED;
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
add_credits(ses->server, 1, optype);
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
}
spin_lock_irq(&mapping->tree_lock);
+ if (!entry) {
+ /*
+ * We needed to drop the page_tree lock while calling
+ * radix_tree_preload() and we didn't have an entry to
+ * lock. See if another thread inserted an entry at
+ * our index during this time.
+ */
+ entry = __radix_tree_lookup(&mapping->page_tree, index,
+ NULL, &slot);
+ if (entry) {
+ radix_tree_preload_end();
+ spin_unlock_irq(&mapping->tree_lock);
+ goto restart;
+ }
+ }
+
if (pmd_downgrade) {
radix_tree_delete(&mapping->page_tree, index);
mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
if (err) {
spin_unlock_irq(&mapping->tree_lock);
/*
- * Someone already created the entry? This is a
- * normal failure when inserting PMDs in a range
- * that already contains PTEs. In that case we want
- * to return -EEXIST immediately.
- */
- if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
- goto restart;
- /*
- * Our insertion of a DAX PMD entry failed, most
- * likely because it collided with a PTE sized entry
- * at a different index in the PMD range. We haven't
- * inserted anything into the radix tree and have no
- * waiters to wake.
+ * Our insertion of a DAX entry failed, most likely
+ * because we were inserting a PMD entry and it
+ * collided with a PTE sized entry at a different
+ * index in the PMD range. We haven't inserted
+ * anything into the radix tree and have no waiters to
+ * wake.
*/
return ERR_PTR(err);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f493af666591..fb69ee2388db 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *);
extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
+extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8210c1f43556..cefa9835f275 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
- .getattr = ext4_getattr,
+ .getattr = ext4_file_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4247d8d25687..b9ffa9f4191f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5390,11 +5390,46 @@ err_out:
int ext4_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- struct inode *inode;
- unsigned long long delalloc_blocks;
+ struct inode *inode = d_inode(path->dentry);
+ struct ext4_inode *raw_inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int flags;
+
+ if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = ei->i_crtime.tv_sec;
+ stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
+ }
+
+ flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
+ if (flags & EXT4_APPEND_FL)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (flags & EXT4_COMPR_FL)
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+ if (flags & EXT4_ENCRYPT_FL)
+ stat->attributes |= STATX_ATTR_ENCRYPTED;
+ if (flags & EXT4_IMMUTABLE_FL)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (flags & EXT4_NODUMP_FL)
+ stat->attributes |= STATX_ATTR_NODUMP;
+
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_COMPRESSED |
+ STATX_ATTR_ENCRYPTED |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
- inode = d_inode(path->dentry);
generic_fillattr(inode, stat);
+ return 0;
+}
+
+int ext4_file_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
+{
+ struct inode *inode = d_inode(path->dentry);
+ u64 delalloc_blocks;
+
+ ext4_getattr(path, stat, request_mask, query_flags);
/*
* If there is inline data in the inode, the inode will normally not
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6ad612c576fc..07e5e1405771 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
.tmpfile = ext4_tmpfile,
.rename = ext4_rename2,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
const struct inode_operations ext4_special_inode_operations = {
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 73b184d161fc..5c8fc53cb0e5 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -85,17 +85,20 @@ errout:
const struct inode_operations ext4_encrypted_symlink_inode_operations = {
.get_link = ext4_encrypted_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
const struct inode_operations ext4_symlink_inode_operations = {
.get_link = page_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
const struct inode_operations ext4_fast_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c24351a67f..cd261c8de53a 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
if (!new_op)
return -ENOMEM;
new_op->upcall.req.features.features = 0;
- ret = service_operation(new_op, "orangefs_features", 0);
- orangefs_features = new_op->downcall.resp.features.features;
+ ret = service_operation(new_op, "orangefs_features",
+ ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
+ if (!ret)
+ orangefs_features =
+ new_op->downcall.resp.features.features;
+ else
+ orangefs_features = 0;
op_release(new_op);
} else {
orangefs_features = 0;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 8f91ec66baa3..d04ea4349909 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
if ((table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
+ (table->proc_handler == proc_douintvec) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/fs/stat.c b/fs/stat.c
index fa0be59340cc..c6c963b2546b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
int vfs_statx_fd(unsigned int fd, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- struct fd f = fdget_raw(fd);
+ struct fd f;
int error = -EBADF;
+ if (query_flags & ~KSTAT_QUERY_FLAGS)
+ return -EINVAL;
+
+ f = fdget_raw(fd);
if (f.file) {
error = vfs_getattr(&f.file->f_path, stat,
request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
* Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
* at the given name from being referenced.
*
- * The caller must have preset stat->request_mask as for vfs_getattr(). The
- * flags are also used to load up stat->query_flags.
- *
* 0 will be returned on success, and a -ve error code if unsuccessful.
*/
int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
}
#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
-static inline int __put_timestamp(struct timespec *kts,
- struct statx_timestamp __user *uts)
-{
- return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
- __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
- __put_user(0, &uts->__reserved ));
-}
-
-/*
- * Set the statx results.
- */
-static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
+static noinline_for_stack int
+cp_statx(const struct kstat *stat, struct statx __user *buffer)
{
- uid_t uid = from_kuid_munged(current_user_ns(), stat->uid);
- gid_t gid = from_kgid_munged(current_user_ns(), stat->gid);
-
- if (__put_user(stat->result_mask, &buffer->stx_mask ) ||
- __put_user(stat->mode, &buffer->stx_mode ) ||
- __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) ||
- __put_user(stat->nlink, &buffer->stx_nlink ) ||
- __put_user(uid, &buffer->stx_uid ) ||
- __put_user(gid, &buffer->stx_gid ) ||
- __put_user(stat->attributes, &buffer->stx_attributes ) ||
- __put_user(stat->blksize, &buffer->stx_blksize ) ||
- __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) ||
- __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) ||
- __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) ||
- __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) ||
- __put_timestamp(&stat->atime, &buffer->stx_atime ) ||
- __put_timestamp(&stat->btime, &buffer->stx_btime ) ||
- __put_timestamp(&stat->ctime, &buffer->stx_ctime ) ||
- __put_timestamp(&stat->mtime, &buffer->stx_mtime ) ||
- __put_user(stat->ino, &buffer->stx_ino ) ||
- __put_user(stat->size, &buffer->stx_size ) ||
- __put_user(stat->blocks, &buffer->stx_blocks ) ||
- __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) ||
- __clear_user(&buffer->__spare2, sizeof(buffer->__spare2)))
- return -EFAULT;
-
- return 0;
+ struct statx tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ tmp.stx_mask = stat->result_mask;
+ tmp.stx_blksize = stat->blksize;
+ tmp.stx_attributes = stat->attributes;
+ tmp.stx_nlink = stat->nlink;
+ tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
+ tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
+ tmp.stx_mode = stat->mode;
+ tmp.stx_ino = stat->ino;
+ tmp.stx_size = stat->size;
+ tmp.stx_blocks = stat->blocks;
+ tmp.stx_attributes_mask = stat->attributes_mask;
+ tmp.stx_atime.tv_sec = stat->atime.tv_sec;
+ tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
+ tmp.stx_btime.tv_sec = stat->btime.tv_sec;
+ tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
+ tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
+ tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
+ tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
+ tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
+ tmp.stx_rdev_major = MAJOR(stat->rdev);
+ tmp.stx_rdev_minor = MINOR(stat->rdev);
+ tmp.stx_dev_major = MAJOR(stat->dev);
+ tmp.stx_dev_minor = MINOR(stat->dev);
+
+ return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
/**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
struct kstat stat;
int error;
+ if (mask & STATX__RESERVED)
+ return -EINVAL;
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
- return -EFAULT;
if (filename)
error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
error = vfs_statx_fd(dfd, &stat, mask, flags);
if (error)
return error;
- return statx_set_result(&stat, buffer);
+
+ return cp_statx(&stat, buffer);
}
/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213d1307..39c75a86c67f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
struct kobject *kobj = of->kn->parent->priv;
- size_t len;
+ ssize_t len;
/*
* If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (len < 0)
+ return len;
if (pos) {
if (len <= pos)
return 0;
len -= pos;
memmove(buf, buf + pos, len);
}
- return min(count, len);
+ return min_t(ssize_t, count, len);
}
/* kernfs write callback for regular sysfs files */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
* protocols: aa:... bb:...
*/
seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
- pending, total, UFFD_API, UFFD_API_FEATURES,
+ pending, total, UFFD_API, ctx->features,
UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
}
#endif
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index eb00bc133bca..39f8604f764e 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
- int size);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 96b45cd6c63f..e84af093b2ab 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -632,36 +632,49 @@ xfs_dir2_sf_check(
/* Verify the consistency of an inline directory. */
int
xfs_dir2_sf_verify(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *sfp,
- int size)
+ struct xfs_inode *ip)
{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dir2_sf_hdr *sfp;
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
char *endp;
const struct xfs_dir_ops *dops;
+ struct xfs_ifork *ifp;
xfs_ino_t ino;
int i;
int i8count;
int offset;
+ int size;
+ int error;
__uint8_t filetype;
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ /*
+ * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+ * so we can only trust the mountpoint to have the right pointer.
+ */
dops = xfs_dir_get_ops(mp, NULL);
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+
/*
* Give up if the directory is way too short.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, size >
- offsetof(struct xfs_dir2_sf_hdr, parent));
- XFS_WANT_CORRUPTED_RETURN(mp, size >=
- xfs_dir2_sf_hdr_size(sfp->i8count));
+ if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+ size < xfs_dir2_sf_hdr_size(sfp->i8count))
+ return -EFSCORRUPTED;
endp = (char *)sfp + size;
/* Check .. entry */
ino = dops->sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
- XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
offset = dops->data_first_offset;
/* Check all reported entries */
@@ -672,12 +685,12 @@ xfs_dir2_sf_verify(
* Check the fixed-offset parts of the structure are
* within the data buffer.
*/
- XFS_WANT_CORRUPTED_RETURN(mp,
- ((char *)sfep + sizeof(*sfep)) < endp);
+ if (((char *)sfep + sizeof(*sfep)) >= endp)
+ return -EFSCORRUPTED;
/* Don't allow names with known bad length. */
- XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
- XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+ if (sfep->namelen == 0)
+ return -EFSCORRUPTED;
/*
* Check that the variable-length part of the structure is
@@ -685,33 +698,39 @@ xfs_dir2_sf_verify(
* name component, so nextentry is an acceptable test.
*/
next_sfep = dops->sf_nextentry(sfp, sfep);
- XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+ if (endp < (char *)next_sfep)
+ return -EFSCORRUPTED;
/* Check that the offsets always increase. */
- XFS_WANT_CORRUPTED_RETURN(mp,
- xfs_dir2_sf_get_offset(sfep) >= offset);
+ if (xfs_dir2_sf_get_offset(sfep) < offset)
+ return -EFSCORRUPTED;
/* Check the inode number. */
ino = dops->sf_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
- XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
/* Check the file type. */
filetype = dops->sf_get_ftype(sfep);
- XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+ if (filetype >= XFS_DIR3_FT_MAX)
+ return -EFSCORRUPTED;
offset = xfs_dir2_sf_get_offset(sfep) +
dops->data_entsize(sfep->namelen);
sfep = next_sfep;
}
- XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
- XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+ if (i8count != sfp->i8count)
+ return -EFSCORRUPTED;
+ if ((void *)sfep != (void *)endp)
+ return -EFSCORRUPTED;
/* Make sure this whole thing ought to be in local format. */
- XFS_WANT_CORRUPTED_RETURN(mp, offset +
- (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
- (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+ if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+ return -EFSCORRUPTED;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 9653e964eda4..8a37efe04de3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -212,6 +212,16 @@ xfs_iformat_fork(
if (error)
return error;
+ /* Check inline dir contents. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ dip->di_format == XFS_DINODE_FMT_LOCAL) {
+ error = xfs_dir2_sf_verify(ip);
+ if (error) {
+ xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ return error;
+ }
+ }
+
if (xfs_is_reflink_inode(ip)) {
ASSERT(ip->i_cowfp == NULL);
xfs_ifork_init_cow(ip);
@@ -322,8 +332,6 @@ xfs_iformat_local(
int whichfork,
int size)
{
- int error;
-
/*
* If the size is unreasonable, then something
* is wrong and we just bail out rather than crash in
@@ -339,14 +347,6 @@ xfs_iformat_local(
return -EFSCORRUPTED;
}
- if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
- error = xfs_dir2_sf_verify(ip->i_mount,
- (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
- size);
- if (error)
- return error;
- }
-
xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
return 0;
}
@@ -867,7 +867,7 @@ xfs_iextents_copy(
* In these cases, the format always takes precedence, because the
* format indicates the current state of the fork.
*/
-int
+void
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
@@ -877,7 +877,6 @@ xfs_iflush_fork(
char *cp;
xfs_ifork_t *ifp;
xfs_mount_t *mp;
- int error;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
static const short dataflag[2] =
@@ -886,7 +885,7 @@ xfs_iflush_fork(
{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
if (!iip)
- return 0;
+ return;
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
* This can happen if we gave up in iformat in an error path,
@@ -894,19 +893,12 @@ xfs_iflush_fork(
*/
if (!ifp) {
ASSERT(whichfork == XFS_ATTR_FORK);
- return 0;
+ return;
}
cp = XFS_DFORK_PTR(dip, whichfork);
mp = ip->i_mount;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
- if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
- error = xfs_dir2_sf_verify(mp,
- (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
- ifp->if_bytes);
- if (error)
- return error;
- }
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
@@ -959,7 +951,6 @@ xfs_iflush_fork(
ASSERT(0);
break;
}
- return 0;
}
/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 132dc59fdde6..7fb8365326d1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8b75dcea5966..828532ce0adc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
/*
* Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. xfs_zero_range is
- * smart enough to skip any holes, including those we just created.
+ * smart enough to skip any holes, including those we just created,
+ * but we must take care not to zero beyond EOF and enlarge i_size.
*/
+
+ if (offset >= XFS_ISIZE(ip))
+ return 0;
+
+ if (offset + len > XFS_ISIZE(ip))
+ len = XFS_ISIZE(ip) - offset;
+
return xfs_zero_range(ip, offset, len, NULL);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c7fe2c2123ab..7605d8396596 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_inode_zone;
@@ -3475,7 +3476,6 @@ xfs_iflush_int(
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_dinode *dip;
struct xfs_mount *mp = ip->i_mount;
- int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
@@ -3547,6 +3547,12 @@ xfs_iflush_int(
if (ip->i_d.di_version < 3)
ip->i_d.di_flushiter++;
+ /* Check the inline directory data. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+ xfs_dir2_sf_verify(ip))
+ goto corrupt_out;
+
/*
* Copy the dirty parts of the inode into the on-disk inode. We always
* copy out the core of the inode, because if the inode is dirty at all
@@ -3558,14 +3564,9 @@ xfs_iflush_int(
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
ip->i_d.di_flushiter = 0;
- error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
- if (error)
- return error;
- if (XFS_IFORK_Q(ip)) {
- error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
- if (error)
- return error;
- }
+ xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+ if (XFS_IFORK_Q(ip))
+ xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
xfs_inobp_check(mp, bp);
/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 229cc6a6d8ef..ebfc13350f9a 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -516,6 +516,20 @@ xfs_vn_getattr(
stat->blocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+ if (ip->i_d.di_version == 3) {
+ if (request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
+ stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
+ }
+ }
+
+ if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
+ stat->attributes |= STATX_ATTR_NODUMP;
switch (inode->i_mode & S_IFMT) {
case S_IFBLK:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2a6d9b1558e0..26d67ce3c18d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -583,7 +583,7 @@ xfs_inumbers(
return error;
bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
- buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+ buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
do {
struct xfs_inobt_rec_incore r;
int stat;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7cdfe167074f..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -261,9 +261,9 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
- __start_ro_after_init = .; \
+ VMLINUX_SYMBOL(__start_ro_after_init) = .; \
*(.data..ro_after_init) \
- __end_ro_after_init = .;
+ VMLINUX_SYMBOL(__end_ro_after_init) = .;
#endif
/*
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index e2ee74c20b8f..4eeda120e46d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -225,6 +225,10 @@ struct drm_display_info {
#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
/* drive data on neg. edge */
#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
+/* data is transmitted MSB to LSB on the bus */
+#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
+/* data is transmitted LSB to MSB on the bus */
+#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
/**
* @bus_flags: Additional information (like pixel signal polarity) for
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0b1ce05e2c2e..fa07be197945 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -711,6 +711,17 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
/**
+ * ttm_bo_default_iomem_pfn - get a pfn for a page offset
+ *
+ * @bo: the BO we need to look up the pfn for
+ * @page_offset: offset inside the BO to look up.
+ *
+ * Calculate the PFN for iomem based mappings during page fault
+ */
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
* @filp: filp as input from the mmap method.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3641c6128ac2..6bbd34d25a8d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -462,6 +462,15 @@ struct ttm_bo_driver {
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
};
/**
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f98f0e1..1487011fe057 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
*
* Checks that the base object is shareable and adds a ref object to it.
*
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed);
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 932be0c8086e..e88a8e39767b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -63,6 +63,7 @@
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
+void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..9382c5da7a2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
+ struct delayed_work delayed_run_work;
struct delayed_work delay_work;
struct hlist_node cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..7548f332121a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -610,7 +610,6 @@ struct request_queue {
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..22d39e8d4de1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct elevator_queue *);
+extern void elevator_exit(struct request_queue *, struct elevator_queue *);
extern int elevator_change(struct request_queue *, const char *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
#define GICH_MISR_EOI (1 << 0)
#define GICH_MISR_U (1 << 1)
+#define GICV_PMR_PRIORITY_SHIFT 3
+#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/irqdomain.h>
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..3eef9fb9968a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
* Max bus-specific overhead incurred by request/responses.
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
* */
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 2
+#define EC_MAX_RESPONSE_OVERHEAD 32
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..9061780b141f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
- NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
+ NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
- NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
- NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
- NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
- NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
+ NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
+ NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
+ NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
+ NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
};
/* RDMA Connection Management Service Type codes for Discovery Log Page
* entry TSAS RDMA_CMS field
*/
enum {
- NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
+ NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
#define NVMF_AQ_DEPTH 32
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev);
+extern int pinctrl_enable(struct pinctrl_dev *pctldev);
-/* Please use pinctrl_register_and_init() instead */
+/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
unsigned int nlink;
uint32_t blksize; /* Preferred I/O size */
u64 attributes;
+ u64 attributes_mask;
#define KSTAT_ATTR_FS_IOC_FLAGS \
(STATX_ATTR_COMPRESSED | \
STATX_ATTR_IMMUTABLE | \
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1f71ee5ab518..069582ee5d7f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
return frag;
}
-static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
{
-
- sctp_assoc_sync_pmtu(sk, asoc);
+ sctp_assoc_sync_pmtu(asoc);
asoc->pmtu_pending = 0;
}
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
*/
static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
- if (t->dst && (!dst_check(t->dst, t->dst_cookie) ||
- t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
- SCTP_DEFAULT_MINSEGMENT)))
+ if (t->dst && !dst_check(t->dst, t->dst_cookie))
sctp_transport_dst_release(t);
return t->dst;
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+ SCTP_DEFAULT_MINSEGMENT);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 592decebac75..138f8615acf0 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info {
__u64 hb_nonce;
} sctp_sender_hb_info_t;
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp);
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
void sctp_stream_free(struct sctp_stream *stream);
void sctp_stream_clear(struct sctp_stream *stream);
@@ -499,7 +500,6 @@ struct sctp_datamsg {
/* Did the messenge fail to send? */
int send_error;
u8 send_failed:1,
- force_delay:1,
can_delay; /* should this message be Nagle delayed */
};
@@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
-void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
+void sctp_transport_reset(struct sctp_transport *t);
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1878,6 +1878,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
+ force_delay:1,
prsctp_enable:1,
reconf_enable:1;
@@ -1953,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old,
__u32 sctp_association_get_next_tsn(struct sctp_association *);
-void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 2584c1cca42f..76f6f78a352b 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo {
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
+#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
+#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
+#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
+#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
+ ETNA_SUBMIT_FENCE_FD_IN | \
+ ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02
@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
__u64 stream; /* in, ptr to cmdstream */
+ __u32 flags; /* in, mask of ETNA_SUBMIT_x */
+ __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 51a6b86e3700..d538897b8e08 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -114,7 +114,7 @@ struct statx {
__u64 stx_ino; /* Inode number */
__u64 stx_size; /* File size */
__u64 stx_blocks; /* Number of 512-byte blocks allocated */
- __u64 __spare1[1];
+ __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
/* 0x40 */
struct statx_timestamp stx_atime; /* Last access time */
struct statx_timestamp stx_btime; /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_ALL 0x00000fffU /* All currently supported flags */
+#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
/*
- * Attributes to be found in stx_attributes
+ * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
*
* These give information about the features or the state of a file that might
* be of use to ordinary userspace programs such as GUIs or ls rather than
diff --git a/kernel/audit.h b/kernel/audit.h
index 0f1cf6d1878a..0d87f8ab8778 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -333,13 +333,7 @@ extern u32 audit_sig_sid;
extern int audit_filter(int msgtype, unsigned int listtype);
#ifdef CONFIG_AUDITSYSCALL
-extern int __audit_signal_info(int sig, struct task_struct *t);
-static inline int audit_signal_info(int sig, struct task_struct *t)
-{
- if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
- return __audit_signal_info(sig, t);
- return 0;
-}
+extern int audit_signal_info(int sig, struct task_struct *t);
extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
extern struct list_head *audit_killed_trees(void);
#else
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e59ffc7fc522..1c2333155893 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
-int __audit_signal_info(int sig, struct task_struct *t)
+int audit_signal_info(int sig, struct task_struct *t)
{
struct audit_aux_data_pids *axp;
struct task_struct *tsk = current;
struct audit_context *ctx = tsk->audit_context;
kuid_t uid = current_uid(), t_uid = task_uid(t);
- if (auditd_test_task(t)) {
- if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
- audit_sig_pid = task_tgid_nr(tsk);
- if (uid_valid(tsk->loginuid))
- audit_sig_uid = tsk->loginuid;
- else
- audit_sig_uid = uid;
- security_task_getsecid(tsk, &audit_sig_sid);
- }
- if (!audit_signals || audit_dummy_context())
- return 0;
+ if (auditd_test_task(t) &&
+ (sig == SIGTERM || sig == SIGHUP ||
+ sig == SIGUSR1 || sig == SIGUSR2)) {
+ audit_sig_pid = task_tgid_nr(tsk);
+ if (uid_valid(tsk->loginuid))
+ audit_sig_uid = tsk->loginuid;
+ else
+ audit_sig_uid = uid;
+ security_task_getsecid(tsk, &audit_sig_sid);
}
+ if (!audit_signals || audit_dummy_context())
+ return 0;
+
/* optimize the common case by putting first signal recipient directly
* in audit_context */
if (!ctx->target_pid) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..a834068a400e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
}
}
-static int check_ptr_alignment(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, int off, int size)
+static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+ int off, int size)
{
- if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
- if (off % size != 0) {
- verbose("misaligned access off %d size %d\n",
- off, size);
- return -EACCES;
- } else {
- return 0;
- }
- }
-
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
- /* misaligned access to packet is ok on x86,arm,arm64 */
- return 0;
-
if (reg->id && size != 1) {
- verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+ verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
return -EACCES;
}
/* skb->data is NET_IP_ALIGN-ed */
- if (reg->type == PTR_TO_PACKET &&
- (NET_IP_ALIGN + reg->off + off) % size != 0) {
+ if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
verbose("misaligned packet access off %d+%d+%d size %d\n",
NET_IP_ALIGN, reg->off, off, size);
return -EACCES;
}
+
return 0;
}
+static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
+ int size)
+{
+ if (size != 1) {
+ verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int check_ptr_alignment(const struct bpf_reg_state *reg,
+ int off, int size)
+{
+ switch (reg->type) {
+ case PTR_TO_PACKET:
+ return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+ check_pkt_ptr_alignment(reg, off, size);
+ case PTR_TO_MAP_VALUE_ADJ:
+ return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+ check_val_ptr_alignment(reg, size);
+ default:
+ if (off % size != 0) {
+ verbose("misaligned access off %d size %d\n",
+ off, size);
+ return -EACCES;
+ }
+
+ return 0;
+ }
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
if (size < 0)
return size;
- err = check_ptr_alignment(env, reg, off, size);
+ err = check_ptr_alignment(reg, off, size);
if (err)
return err;
@@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* register as unknown.
*/
if (env->allow_ptr_leaks &&
+ BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
(dst_reg->type == PTR_TO_MAP_VALUE ||
dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
- regs[i].range = dst_reg->off;
+ /* keep the maximum range already checked */
+ regs[i].range = max(regs[i].range, dst_reg->off);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
- reg->range = dst_reg->off;
+ reg->range = max(reg->range, dst_reg->off);
}
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
WARN_ON(!task->ptrace || task->parent != current);
+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acf0a5a06da7..8c8714fcb53c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
if (write) {
if (*negp)
return -EINVAL;
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
+ *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 96fc3c043ad6..54e7a90db848 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void)
rb_data[cpu].cnt = cpu;
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
"rbtester/%d", cpu);
- if (WARN_ON(!rb_threads[cpu])) {
+ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_threads[cpu]);
goto out_free;
}
@@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void)
/* Now create the rb hammer! */
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
- if (WARN_ON(!rb_hammer)) {
+ if (WARN_ON(IS_ERR(rb_hammer))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_hammer);
goto out_free;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..fef4cf210cc7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
- } else if (!memcmp("defer", buf,
- min(sizeof("defer")-1, count))) {
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
- set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
} else if (!memcmp("defer+madvise", buf,
min(sizeof("defer+madvise")-1, count))) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ } else if (!memcmp("defer", buf,
+ min(sizeof("defer")-1, count))) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
} else if (!memcmp("madvise", buf,
min(sizeof("madvise")-1, count))) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
enum ttu_flags;
struct tlbflush_unmap_batch;
+
+/*
+ * only for MM internal work items which do not depend on
+ * any allocations or locks which might depend on allocations
+ */
+extern struct workqueue_struct *mm_percpu_wq;
+
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75b2745bac41..37d0b334bfe9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..f3d603cef2c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
*/
static cpumask_t cpus_with_pcps;
+ /*
+ * Make sure nobody triggers this path before mm_percpu_wq is fully
+ * initialized.
+ */
+ if (WARN_ON_ONCE(!mm_percpu_wq))
+ return;
+
/* Workqueues cannot recurse */
if (current->flags & PF_WQ_WORKER)
return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
- schedule_work_on(cpu, work);
+ queue_work_on(cpu, mm_percpu_wq, work);
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(node_page_state(pgdat, NR_FILE_MAPPED)),
K(node_page_state(pgdat, NR_FILE_DIRTY)),
K(node_page_state(pgdat, NR_WRITEBACK)),
+ K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
* HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
#endif
- K(node_page_state(pgdat, NR_SHMEM)),
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
- /* Only for THP, seek to next pte entry makes sense */
- if (pvmw->pte) {
- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
- return not_found(pvmw);
+ if (pvmw->pte)
goto next_pte;
- }
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
while (1) {
if (check_pte(pvmw))
return true;
-next_pte: do {
+next_pte:
+ /* Seek to next pte only makes sense for THP */
+ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+ return not_found(pvmw);
+ do {
pvmw->address += PAGE_SIZE;
- if (pvmw->address >=
+ if (pvmw->address >= pvmw->vma->vm_end ||
+ pvmw->address >=
__vma_address(pvmw->page, pvmw->vma) +
hpage_nr_pages(pvmw->page) * PAGE_SIZE)
return not_found(pvmw);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-/*
- * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
- * workqueue, aiding in getting memory freed.
- */
-static struct workqueue_struct *lru_add_drain_wq;
-
-static int __init lru_init(void)
-{
- lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
-
- if (WARN(!lru_add_drain_wq,
- "Failed to create workqueue lru_add_drain_wq"))
- return -ENOMEM;
-
- return 0;
-}
-early_initcall(lru_init);
-
void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
int cpu;
+ /*
+ * Make sure nobody triggers this path before mm_percpu_wq is fully
+ * initialized.
+ */
+ if (WARN_ON(!mm_percpu_wq))
+ return;
+
mutex_lock(&lock);
get_online_cpus();
cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
- queue_work_on(cpu, lru_add_drain_wq, work);
+ queue_work_on(cpu, mm_percpu_wq, work);
cpumask_set_cpu(cpu, &has_work);
}
}
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
struct page *page = map[i];
if (page)
__free_page(page);
+ if (!(i % SWAP_CLUSTER_MAX))
+ cond_resched();
}
vfree(map);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 89f95396ec46..809025ed97ea 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
-static struct workqueue_struct *vmstat_wq;
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
- queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+ queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
if (!delayed_work_pending(dw) && need_update(cpu))
- queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+ queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
}
put_online_cpus();
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
- vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
}
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
#endif
+struct workqueue_struct *mm_percpu_wq;
+
void __init init_mm_internals(void)
{
-#ifdef CONFIG_SMP
- int ret;
+ int ret __maybe_unused;
+ mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+
+#ifdef CONFIG_SMP
ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
NULL, vmstat_cpu_dead);
if (ret < 0)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..d98d4998213d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -390,7 +390,7 @@ mpls:
unsigned char ar_tip[4];
} *arp_eth, _arp_eth;
const struct arphdr *arp;
- struct arphdr *_arp;
+ struct arphdr _arp;
arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
hlen, &_arp);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7c12caa20c8..4526cbd7e28a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
if (skb)
skb = skb_clone(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
+ if (neigh->ops->solicit)
+ neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
kfree_skb(skb);
}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..d28da7d363f1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -20,9 +20,11 @@
#include <net/tcp.h>
static siphash_key_t net_secret __read_mostly;
+static siphash_key_t ts_secret __read_mostly;
static __always_inline void net_secret_init(void)
{
+ net_get_random_once(&ts_secret, sizeof(ts_secret));
net_get_random_once(&net_secret, sizeof(net_secret));
}
#endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
#endif
#if IS_ENABLED(CONFIG_IPV6)
+static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+{
+ const struct {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ } __aligned(SIPHASH_ALIGNMENT) combined = {
+ .saddr = *(struct in6_addr *)saddr,
+ .daddr = *(struct in6_addr *)daddr,
+ };
+
+ if (sysctl_tcp_timestamps != 1)
+ return 0;
+
+ return siphash(&combined, offsetofend(typeof(combined), daddr),
+ &ts_secret);
+}
+
u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport, u32 *tsoff)
{
@@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
net_secret_init();
hash = siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
- *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+ *tsoff = secure_tcpv6_ts_off(saddr, daddr);
return seq_scale(hash);
}
EXPORT_SYMBOL(secure_tcpv6_sequence_number);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#endif
#ifdef CONFIG_INET
+static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+{
+ if (sysctl_tcp_timestamps != 1)
+ return 0;
+
+ return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+ &ts_secret);
+}
/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
* but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u32)sport << 16 | (__force u32)dport,
&net_secret);
- *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+ *tsoff = secure_tcp_ts_off(saddr, daddr);
return seq_scale(hash);
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4ead336e14ea..7f9cc400eca0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
.data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
{
.procname = "busy_read",
.data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
#endif
#ifdef CONFIG_NET_SCHED
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index fd9f34bbd740..dfb2ab2dd3c8 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
while ((d = next)) {
next = d->next;
dev = d->dev;
- if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+ if (d != ic_dev && !netdev_uses_dsa(dev)) {
pr_debug("IP-Config: Downing %s\n", dev->name);
dev_change_flags(dev, d->flags);
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c361da2..53e49f5011d3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
.timeout = 180,
};
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
- .me = THIS_MODULE,
- .help = help,
- .expect_policy = &snmp_exp_policy,
- .name = "snmp",
- .tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
- .tuple.dst.protonum = IPPROTO_UDP,
-};
-
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
@@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
static int __init nf_nat_snmp_basic_init(void)
{
- int ret = 0;
-
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);
- ret = nf_conntrack_helper_register(&snmp_trap_helper);
- if (ret < 0) {
- nf_conntrack_helper_unregister(&snmp_helper);
- return ret;
- }
- return ret;
+ return nf_conntrack_helper_register(&snmp_trap_helper);
}
static void __exit nf_nat_snmp_basic_fini(void)
{
RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+ synchronize_rcu();
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2af6244b83e2..ccfbce13a633 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c43119726a62..2c1f59386a7b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define REXMIT_LOST 1 /* retransmit packets marked lost */
#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
-static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
+ unsigned int len)
{
static bool __once __read_mostly;
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
- pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
- dev ? dev->name : "Unknown driver");
+ if (!dev || len >= dev->mtu)
+ pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+ dev ? dev->name : "Unknown driver");
rcu_read_unlock();
}
}
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (len >= icsk->icsk_ack.rcv_mss) {
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
- if (unlikely(icsk->icsk_ack.rcv_mss != len))
- tcp_gro_dev_warn(sk, skb);
+ /* Account for possibly-removed options */
+ if (unlikely(len > icsk->icsk_ack.rcv_mss +
+ MAX_TCP_OPTION_SPACE))
+ tcp_gro_dev_warn(sk, skb, len);
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (metric > tp->reordering) {
- int mib_idx;
+ int mib_idx;
+ if (metric > tp->reordering) {
tp->reordering = min(sysctl_tcp_max_reordering, metric);
- /* This exciting event is worth to be remembered. 8) */
- if (ts)
- mib_idx = LINUX_MIB_TCPTSREORDER;
- else if (tcp_is_reno(tp))
- mib_idx = LINUX_MIB_TCPRENOREORDER;
- else if (tcp_is_fack(tp))
- mib_idx = LINUX_MIB_TCPFACKREORDER;
- else
- mib_idx = LINUX_MIB_TCPSACKREORDER;
-
- NET_INC_STATS(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
}
tp->rack.reord = 1;
+
+ /* This exciting event is worth to be remembered. 8) */
+ if (ts)
+ mib_idx = LINUX_MIB_TCPTSREORDER;
+ else if (tcp_is_reno(tp))
+ mib_idx = LINUX_MIB_TCPRENOREORDER;
+ else if (tcp_is_fack(tp))
+ mib_idx = LINUX_MIB_TCPFACKREORDER;
+ else
+ mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+ NET_INC_STATS(sock_net(sk), mib_idx);
}
/* This must be called before lost_out is incremented */
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 4ecb38ae8504..d8acbd9f477a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
/* Account for retransmits that are lost again */
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
+ tcp_skb_pcount(skb));
}
}
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 309062f3debe..31762f76cdb5 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct kcm_attach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_attach_ioctl(sock, &info);
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct kcm_unattach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_unattach_ioctl(sock, &info);
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct socket *newsock = NULL;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_clone(sock, &info, &newsock);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8adab6335ced..e37d9554da7b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ if (!tunnel) {
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ session_list = l2tp_session_id_hash_2(pn, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+ }
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, session_list, hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session_walk, head, hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist;
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+ session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_for_each_entry(session_walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist_glob;
+
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ hlist_add_head(&session->hlist, head);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return 0;
+
+exist_glob:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return -EEXIST;
+}
/* Lookup a tunnel by id
*/
@@ -633,6 +733,9 @@ discard:
* a data (not control) frame before coming here. Fields up to the
* session-id have already been parsed and ptr points to the data
* after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int offset;
u32 ns, nr;
- /* The ref count is increased since we now hold a pointer to
- * the session. Take care to decrement the refcnt when exiting
- * this function from now on...
- */
- l2tp_session_inc_refcount(session);
- if (session->ref)
- (*session->ref)(session);
-
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);
- l2tp_session_dec_refcount(session);
-
return;
discard:
@@ -812,8 +905,6 @@ discard:
if (session->deref)
(*session->deref)(session);
-
- l2tp_session_dec_refcount(session);
}
EXPORT_SYMBOL(l2tp_recv_common);
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
}
/* Find the session context */
- session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
if (!session || !session->recv_skb) {
+ if (session) {
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ }
+
/* Not found? Pass to userspace to deal with */
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
+ int err;
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
l2tp_session_set_header_len(session, tunnel->version);
+ err = l2tp_session_add_to_tunnel(tunnel, session);
+ if (err) {
+ kfree(session);
+
+ return ERR_PTR(err);
+ }
+
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
-
- /* And to the global session list if L2TPv3 */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- }
-
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
+
+ return session;
}
- return session;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(l2tp_session_create);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index aebf281d09ee..8ce7818c7a9d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -230,11 +230,16 @@ out:
return tunnel;
}
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref);
struct l2tp_session *l2tp_session_find(struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref);
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
}
/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8bf18a5f66e0..6fd41d7afe1e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
goto out;
}
- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- rc = -EEXIST;
- goto out;
- }
-
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
- if (!session) {
- rc = -ENOMEM;
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
goto out;
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d25038cfd64e..4d322c1b7233 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
@@ -178,9 +179,10 @@ pass_up:
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a4abcbc4c09a..88b397c30d86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
+
return 0;
pass_up:
@@ -191,9 +193,10 @@ pass_up:
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 3620fba31786..7e3e669baac4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
}
return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
session_id, peer_session_id, &cfg);
if (ret >= 0) {
- session = l2tp_session_find(net, tunnel, session_id);
- if (session)
+ session = l2tp_session_get(net, tunnel, session_id, false);
+ if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
+ l2tp_session_dec_refcount(session);
+ }
}
out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
struct l2tp_session *session;
u16 pw_type;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
int ret = 0;
struct l2tp_session *session;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
int ret;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
0, session, L2TP_CMD_SESSION_GET);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
-err_out:
- nlmsg_free(msg);
+ l2tp_session_dec_refcount(session);
-out:
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
goto out;
}
- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session, L2TP_CMD_SESSION_GET) < 0)
+ session, L2TP_CMD_SESSION_GET) < 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);
si++;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 36cc56fd0418..861b255a2d51 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
static void pppol2tp_session_destruct(struct sock *sk)
{
struct l2tp_session *session = sk->sk_user_data;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
if (session) {
sk->sk_user_data = NULL;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
l2tp_session_queue_purge(session);
sock_put(sk);
}
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
/* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
+ bool drop_refcnt = false;
int ver = 2;
int fd;
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = peer_tunnel_id;
- /* Create session if it doesn't already exist. We handle the
- * case where a session was previously created by the netlink
- * interface by checking that the session doesn't already have
- * a socket and its tunnel socket are what we expect. If any
- * of those checks fail, return EEXIST to the caller.
- */
- session = l2tp_session_find(sock_net(sk), tunnel, session_id);
- if (session == NULL) {
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
+ session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+ if (session) {
+ drop_refcnt = true;
+ ps = l2tp_session_priv(session);
+
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
*/
- cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (ps->sock) {
+ error = -EEXIST;
+ goto end;
+ }
- /* Allocate and initialize a new session context. */
- session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
- if (session == NULL) {
- error = -ENOMEM;
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock) {
+ error = -EEXIST;
goto end;
}
} else {
- ps = l2tp_session_priv(session);
- error = -EEXIST;
- if (ps->sock != NULL)
- goto end;
+ /* Default MTU must allow space for UDP/L2TP/PPP headers */
+ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ cfg.mru = cfg.mtu;
- /* consistency checks */
- if (ps->tunnel_sock != tunnel->sock)
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto end;
+ }
}
/* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
session->name);
end:
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
release_sock(sk);
return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
if (tunnel->sock == NULL)
goto out;
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = l2tp_session_find(net, tunnel, session_id);
- if (session != NULL)
- goto out;
-
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
cfg->mru = cfg->mtu;
/* Allocate and initialize a new session context. */
- error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
- if (session == NULL)
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto out;
+ }
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
- l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
+ l2tp_session_get(sock_net(sk), tunnel,
+ stats.session_id, true);
+
+ if (session) {
+ err = pppol2tp_session_ioctl(session, cmd,
+ arg);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ } else {
err = -EBADR;
+ }
break;
}
#ifdef CONFIG_XFRM
@@ -1554,7 +1560,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -1681,10 +1687,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
@@ -1843,4 +1853,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 40813dd3301c..5bb0c5012819 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
ieee80211_recalc_ps(local);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ local->ops->wake_tx_queue) {
/* XXX: for AP_VLAN, actually track AP queues */
netif_tx_start_all_queues(dev);
} else if (dev) {
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d56e66..22fc32143e9c 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00c2492..008299b7f78f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len + var_alloc_len;
alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
newoff = ALIGN(old->len, t->align);
newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6806b5e73567..908d858034e4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3442,6 +3442,7 @@ static void __exit ctnetlink_exit(void)
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
RCU_INIT_POINTER(nfnl_ct_hook, NULL);
#endif
+ synchronize_rcu();
}
module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 94b14c5a8b17..82802e4a6640 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
+ synchronize_rcu();
+
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de8782345c86..d45558178da5 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+struct nfnl_cthelper {
+ struct list_head list;
+ struct nf_conntrack_helper helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
static int
nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
int i, ret;
struct nf_conntrack_expect_policy *expect_policy;
struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+ unsigned int class_max;
ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
- helper->expect_class_max =
- ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
- if (helper->expect_class_max != 0 &&
- helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (class_max == 0)
+ return -EINVAL;
+ if (class_max > NF_CT_MAX_EXPECT_CLASSES)
return -EOVERFLOW;
expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
- helper->expect_class_max, GFP_KERNEL);
+ class_max, GFP_KERNEL);
if (expect_policy == NULL)
return -ENOMEM;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < class_max; i++) {
if (!tb[NFCTH_POLICY_SET+i])
goto err;
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
if (ret < 0)
goto err;
}
+
+ helper->expect_class_max = class_max - 1;
helper->expect_policy = expect_policy;
return 0;
err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
+ struct nfnl_cthelper *nfcth;
int ret;
if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
return -EINVAL;
- helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
- if (helper == NULL)
+ nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+ if (nfcth == NULL)
return -ENOMEM;
+ helper = &nfcth->helper;
ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
- goto err;
+ goto err1;
strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
ret = nf_conntrack_helper_register(helper);
if (ret < 0)
- goto err;
+ goto err2;
+ list_add_tail(&nfcth->list, &nfnl_cthelper_list);
return 0;
-err:
- kfree(helper);
+err2:
+ kfree(helper->expect_policy);
+err1:
+ kfree(nfcth);
return ret;
}
static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+ struct nf_conntrack_expect_policy *new_policy,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+ nfnl_cthelper_expect_pol);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFCTH_POLICY_NAME] ||
+ !tb[NFCTH_POLICY_EXPECT_MAX] ||
+ !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+ return -EINVAL;
+
+ if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+ return -EBUSY;
+
+ new_policy->max_expected =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+ new_policy->timeout =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+ return 0;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+ struct nf_conntrack_helper *helper)
+{
+ struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+ struct nf_conntrack_expect_policy *policy;
+ int i, err;
+
+ /* Check first that all policy attributes are well-formed, so we don't
+ * leave things in inconsistent state on errors.
+ */
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+ if (!tb[NFCTH_POLICY_SET + i])
+ return -EINVAL;
+
+ err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+ &new_policy[i],
+ tb[NFCTH_POLICY_SET + i]);
+ if (err < 0)
+ return err;
+ }
+ /* Now we can safely update them. */
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
+ policy = (struct nf_conntrack_expect_policy *)
+ &helper->expect_policy[i];
+ policy->max_expected = new_policy->max_expected;
+ policy->timeout = new_policy->timeout;
+ }
+
+ return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+ unsigned int class_max;
+ int err;
+
+ err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+ nfnl_cthelper_expect_policy_set);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFCTH_POLICY_SET_NUM])
+ return -EINVAL;
+
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (helper->expect_class_max + 1 != class_max)
+ return -EBUSY;
+
+ return nfnl_cthelper_update_policy_all(tb, helper);
+}
+
+static int
nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper)
{
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
return -EBUSY;
if (tb[NFCTH_POLICY]) {
- ret = nfnl_cthelper_parse_expect_policy(helper,
- tb[NFCTH_POLICY]);
+ ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
return ret;
}
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
const char *helper_name;
struct nf_conntrack_helper *cur, *helper = NULL;
struct nf_conntrack_tuple tuple;
- int ret = 0, i;
+ struct nfnl_cthelper *nlcth;
+ int ret = 0;
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
if (ret < 0)
return ret;
- rcu_read_lock();
- for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- if (strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0)
- continue;
+ if ((tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- if ((tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
- if (nlh->nlmsg_flags & NLM_F_EXCL) {
- ret = -EEXIST;
- goto err;
- }
- helper = cur;
- break;
- }
+ helper = cur;
+ break;
}
- rcu_read_unlock();
if (helper == NULL)
ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
ret = nfnl_cthelper_update(tb, helper);
return ret;
-err:
- rcu_read_unlock();
- return ret;
}
static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
- htonl(helper->expect_class_max)))
+ htonl(helper->expect_class_max + 1)))
goto nla_put_failure;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
nest_parms2 = nla_nest_start(skb,
(NFCTH_POLICY_SET+i) | NLA_F_NESTED);
if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const tb[])
{
- int ret = -ENOENT, i;
+ int ret = -ENOENT;
struct nf_conntrack_helper *cur;
struct sk_buff *skb2;
char *helper_name = NULL;
struct nf_conntrack_tuple tuple;
+ struct nfnl_cthelper *nlcth;
bool tuple_set = false;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
tuple_set = true;
}
- for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
+ if (helper_name &&
+ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- if (helper_name && strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0) {
- continue;
- }
- if (tuple_set &&
- (tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
-
- skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL) {
- ret = -ENOMEM;
- break;
- }
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
- ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
- nlh->nlmsg_seq,
- NFNL_MSG_TYPE(nlh->nlmsg_type),
- NFNL_MSG_CTHELPER_NEW, cur);
- if (ret <= 0) {
- kfree_skb(skb2);
- break;
- }
+ ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
- ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
- }
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
}
return ret;
}
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
{
char *helper_name = NULL;
struct nf_conntrack_helper *cur;
- struct hlist_node *tmp;
struct nf_conntrack_tuple tuple;
bool tuple_set = false, found = false;
- int i, j = 0, ret;
+ struct nfnl_cthelper *nlcth, *n;
+ int j = 0, ret;
if (tb[NFCTH_NAME])
helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
tuple_set = true;
}
- for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
- hnode) {
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
+ j++;
- j++;
+ if (helper_name &&
+ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- if (helper_name && strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0) {
- continue;
- }
- if (tuple_set &&
- (tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- found = true;
- nf_conntrack_helper_unregister(cur);
- }
+ found = true;
+ nf_conntrack_helper_unregister(cur);
+ kfree(cur->expect_policy);
+
+ list_del(&nlcth->list);
+ kfree(nlcth);
}
+
/* Make sure we return success if we flush and there is no helpers */
return (found || j == 0) ? 0 : -ENOENT;
}
@@ -662,20 +741,16 @@ err_out:
static void __exit nfnl_cthelper_exit(void)
{
struct nf_conntrack_helper *cur;
- struct hlist_node *tmp;
- int i;
+ struct nfnl_cthelper *nlcth, *n;
nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
- for (i=0; i<nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
- hnode) {
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
- nf_conntrack_helper_unregister(cur);
- }
+ nf_conntrack_helper_unregister(cur);
+ kfree(cur->expect_policy);
+ kfree(nlcth);
}
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e0867e56e..47d6656c9119 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+ synchronize_rcu();
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- rcu_barrier();
}
module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3ee0b8a000a4..933509ebf3d3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
- return NULL;
+ goto nlmsg_failure;
}
nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
- return NULL;
+ goto nlmsg_failure;
}
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
}
nlh->nlmsg_len = skb->len;
+ if (seclen)
+ security_release_secctx(secdata, seclen);
return skb;
nla_put_failure:
skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+ if (seclen)
+ security_release_secctx(secdata, seclen);
return NULL;
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e0a87776a010..7b2c2fce408a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
*/
if (nf_ct_is_confirmed(ct))
nf_ct_delete(ct, 0, 0);
- else
- nf_conntrack_put(&ct->ct_general);
+
+ nf_conntrack_put(&ct->ct_general);
nf_ct_set(skb, NULL, 0);
return false;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9d4bb8eb63f2..3f76cb765e5b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
/* Link layer. */
clear_vlan(key);
- if (key->mac_proto == MAC_PROTO_NONE) {
+ if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
if (unlikely(eth_type_vlan(skb->protocol)))
return -EINVAL;
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
{
- return key_extract(skb, key);
+ int res;
+
+ res = key_extract(skb, key);
+ if (!res)
+ key->mac_proto &= ~SW_FLOW_KEY_INVALID;
+
+ return res;
}
static int key_extract_mac_proto(struct sk_buff *skb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a0dbe7ca8f72..8489beff5c25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
po->tp_reserve = val;
return 0;
}
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
if (po->tp_version >= TPACKET_V3 &&
- (int)(req->tp_block_size -
- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+ req->tp_block_size <=
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0439a1a68367..a9708da28eb5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
+ if (sctp_stream_new(asoc, gfp))
+ goto fail_init;
+
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
@@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
- goto fail_init;
+ goto stream_free;
asoc->active_key_id = ep->active_key_id;
asoc->prsctp_enable = ep->prsctp_enable;
@@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
return asoc;
+stream_free:
+ sctp_stream_free(asoc->stream);
fail_init:
sock_put(asoc->base.sk);
sctp_endpoint_put(asoc->ep);
@@ -1407,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
-void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
{
struct sctp_transport *t;
__u32 pmtu = 0;
@@ -1419,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(sk, t,
- SCTP_TRUNC4(dst_mtu(t->dst)));
+ sctp_transport_update_pmtu(
+ t, SCTP_TRUNC4(dst_mtu(t->dst)));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a28ab20487f..0e06a278d2a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
- sctp_transport_update_pmtu(sk, t, pmtu);
+ sctp_transport_update_pmtu(t, pmtu);
/* Update association pmtu. */
- sctp_assoc_sync_pmtu(sk, asoc);
+ sctp_assoc_sync_pmtu(asoc);
}
/* Retransmit with the new pmtu setting.
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1224421036b3..1409a875ad8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
{
struct sctp_transport *tp = packet->transport;
struct sctp_association *asoc = tp->asoc;
+ struct sock *sk;
pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
-
packet->vtag = vtag;
- if (asoc && tp->dst) {
- struct sock *sk = asoc->base.sk;
-
- rcu_read_lock();
- if (__sk_dst_get(sk) != tp->dst) {
- dst_hold(tp->dst);
- sk_setup_caps(sk, tp->dst);
- }
-
- if (sk_can_gso(sk)) {
- struct net_device *dev = tp->dst->dev;
+ /* do the following jobs only once for a flush schedule */
+ if (!sctp_packet_empty(packet))
+ return;
- packet->max_size = dev->gso_max_size;
- } else {
- packet->max_size = asoc->pathmtu;
- }
- rcu_read_unlock();
+ /* set packet max_size with pathmtu */
+ packet->max_size = tp->pathmtu;
+ if (!asoc)
+ return;
- } else {
- packet->max_size = tp->pathmtu;
+ /* update dst or transport pathmtu if in need */
+ sk = asoc->base.sk;
+ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ } else if (!sctp_transport_pmtu_check(tp)) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
}
- if (ecn_capable && sctp_packet_empty(packet)) {
- struct sctp_chunk *chunk;
+ /* If there a is a prepend chunk stick it on the list before
+ * any other chunks get appended.
+ */
+ if (ecn_capable) {
+ struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
- /* If there a is a prepend chunk stick it on the list before
- * any other chunks get appended.
- */
- chunk = sctp_get_ecne_prepend(asoc);
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
+
+ if (!tp->dst)
+ return;
+
+ /* set packet max_size with gso_max_size if gso is enabled*/
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+ packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+ : asoc->pathmtu;
+ rcu_read_unlock();
}
/* Initialize the packet structure. */
@@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
sh->vtag = htonl(packet->vtag);
sh->checksum = 0;
- /* update dst if in need */
- if (!sctp_transport_dst_check(tp)) {
- sctp_transport_route(tp, NULL, sctp_sk(sk));
- if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
- sctp_assoc_sync_pmtu(sk, asoc);
- }
+ /* drop packet if no dst */
dst = dst_clone(tp->dst);
if (!dst) {
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -704,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
*/
if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
- !chunk->msg->force_delay)
+ !asoc->force_delay)
/* Nothing unacked */
return SCTP_XMIT_OK;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 025ccff67072..8081476ed313 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1026,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
- if (chunk->sinfo.sinfo_stream >=
- asoc->c.sinit_num_ostreams) {
+ if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
/* Mark as failed send. */
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377fe91ec..a0b29d43627f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
sctp_seq_dump_remote_addrs(seq, assoc);
seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
"%8d %8d %8d %8d",
- assoc->hbinterval, assoc->c.sinit_max_instreams,
- assoc->c.sinit_num_ostreams, assoc->max_retrans,
+ assoc->hbinterval, assoc->stream->incnt,
+ assoc->stream->outcnt, assoc->max_retrans,
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 969a30c7bb54..118faff6a332 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
* association.
*/
if (!asoc->temp) {
- int error;
-
- asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
- asoc->c.sinit_num_ostreams, gfp);
- if (!asoc->stream)
+ if (sctp_stream_init(asoc, gfp))
goto clean_up;
- error = sctp_assoc_set_id(asoc, gfp);
- if (error)
+ if (sctp_assoc_set_id(asoc, gfp))
goto clean_up;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..24c6ccce7539 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ if (ntohs(skip->stream) >= asoc->stream->incnt)
goto discard_noforce;
}
@@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ if (ntohs(skip->stream) >= asoc->stream->incnt)
goto gen_shutdown;
}
@@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
- if (sid >= asoc->c.sinit_max_instreams) {
+ if (sid >= asoc->stream->incnt) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f378ea2ae38..c1401f43d40f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
}
if (asoc->pmtu_pending)
- sctp_assoc_pending_pmtu(sk, asoc);
+ sctp_assoc_pending_pmtu(asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
}
/* Check for invalid stream. */
- if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
+ if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
err = -EINVAL;
goto out_free;
}
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
err = PTR_ERR(datamsg);
goto out_free;
}
- datamsg->force_delay = !!(msg->msg_flags & MSG_MORE);
+ asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
- sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+ sctp_assoc_sync_pmtu(asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
} else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
- sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+ sctp_assoc_sync_pmtu(asoc);
}
} else if (asoc) {
asoc->param_flags =
@@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
info->sctpi_rwnd = asoc->a_rwnd;
info->sctpi_unackdata = asoc->unack_data;
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- info->sctpi_instrms = asoc->c.sinit_max_instreams;
- info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+ info->sctpi_instrms = asoc->stream->incnt;
+ info->sctpi_outstrms = asoc->stream->outcnt;
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
info->sctpi_inqueue++;
list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- status.sstat_instrms = asoc->c.sinit_max_instreams;
- status.sstat_outstrms = asoc->c.sinit_num_ostreams;
+ status.sstat_instrms = asoc->stream->incnt;
+ status.sstat_outstrms = asoc->stream->outcnt;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..bbed997e1c5f 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,33 +35,60 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_stream *stream;
int i;
stream = kzalloc(sizeof(*stream), gfp);
if (!stream)
- return NULL;
+ return -ENOMEM;
- stream->outcnt = outcnt;
+ stream->outcnt = asoc->c.sinit_num_ostreams;
stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
if (!stream->out) {
kfree(stream);
- return NULL;
+ return -ENOMEM;
}
for (i = 0; i < stream->outcnt; i++)
stream->out[i].state = SCTP_STREAM_OPEN;
- stream->incnt = incnt;
+ asoc->stream = stream;
+
+ return 0;
+}
+
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
+{
+ struct sctp_stream *stream = asoc->stream;
+ int i;
+
+ /* Initial stream->out size may be very big, so free it and alloc
+ * a new one with new outcnt to save memory.
+ */
+ kfree(stream->out);
+ stream->outcnt = asoc->c.sinit_num_ostreams;
+ stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
+ if (!stream->out)
+ goto nomem;
+
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
+
+ stream->incnt = asoc->c.sinit_max_instreams;
stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
if (!stream->in) {
kfree(stream->out);
- kfree(stream);
- return NULL;
+ goto nomem;
}
- return stream;
+ return 0;
+
+nomem:
+ asoc->stream = NULL;
+ kfree(stream);
+
+ return -ENOMEM;
}
void sctp_stream_free(struct sctp_stream *stream)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3379668af368..721eeebfcd8a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
- struct dst_entry *dst;
+ struct dst_entry *dst = sctp_transport_dst_check(t);
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
- __func__, pmtu,
- SCTP_DEFAULT_MINSEGMENT);
+ __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
t->pathmtu = pmtu;
}
- dst = sctp_transport_dst_check(t);
- if (!dst)
- t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
-
if (dst) {
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
-
+ dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
dst = sctp_transport_dst_check(t);
- if (!dst)
- t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
}
+
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
}
/* Caches the dst entry and source address for a transport's destination
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 16b6b5988be9..570a2b67ca10 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
/* Age scan results with time spent in suspend */
cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
- if (rdev->ops->resume) {
- rtnl_lock();
- if (rdev->wiphy.registered)
- ret = rdev_resume(rdev);
- rtnl_unlock();
- }
+ rtnl_lock();
+ if (rdev->wiphy.registered && rdev->ops->resume)
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
return ret;
}
diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c
index 8571d766331d..d4d77b09412c 100644
--- a/samples/statx/test-statx.c
+++ b/samples/statx/test-statx.c
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
if (stx->stx_mask & STATX_BTIME)
print_time(" Birth: ", &stx->stx_btime);
- if (stx->stx_attributes) {
- unsigned char bits;
+ if (stx->stx_attributes_mask) {
+ unsigned char bits, mbits;
int loop, byte;
static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
printf("Attributes: %016llx (", stx->stx_attributes);
for (byte = 64 - 8; byte >= 0; byte -= 8) {
bits = stx->stx_attributes >> byte;
+ mbits = stx->stx_attributes_mask >> byte;
for (loop = 7; loop >= 0; loop--) {
int bit = byte + loop;
- if (bits & 0x80)
+ if (!(mbits & 0x80))
+ putchar('.'); /* Not supported */
+ else if (bits & 0x80)
putchar(attr_representation[63 - bit]);
else
- putchar('-');
+ putchar('-'); /* Not set */
bits <<= 1;
+ mbits <<= 1;
}
if (byte)
putchar(' ');
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..7234e61e7ce3 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -155,7 +155,7 @@ else
# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
# and locates generated .h files
# FIXME: Replace both with specific CFLAGS* statements in the makefiles
-__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \
+__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
$(call flags,_c_flags)
__a_flags = $(call flags,_a_flags)
__cpp_flags = $(call flags,_cpp_flags)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 26d208b435a0..cfddddb9c9d7 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
current = menu;
display_tree_part();
gtk_widget_set_sensitive(back_btn, TRUE);
- } else if ((col == COL_OPTION)) {
+ } else if (col == COL_OPTION) {
toggle_sym_value(menu);
gtk_tree_view_expand_row(view, path, TRUE);
}
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 122153b16ea4..390d7c9685fd 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -168,6 +168,16 @@
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 6a1ad58cb66f..9af09e8099c0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,7 +1,14 @@
LIBDIR := ../../../lib
BPFDIR := $(LIBDIR)/bpf
+APIDIR := ../../../include/uapi
+GENDIR := ../../../../include/generated
+GENHDR := $(GENDIR)/autoconf.h
-CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
+ifneq ($(wildcard $(GENHDR)),)
+ GENFLAGS := -DHAVE_GENHDR
+endif
+
+CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
LDLIBS += -lcap
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index d1555e4240c0..c848e90b6421 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -30,6 +30,14 @@
#include <bpf/bpf.h>
+#ifdef HAVE_GENHDR
+# include "autoconf.h"
+#else
+# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
+# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
+# endif
+#endif
+
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
@@ -39,6 +47,8 @@
#define MAX_INSNS 512
#define MAX_FIXUPS 8
+#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
+
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
@@ -53,6 +63,7 @@ struct bpf_test {
REJECT
} result, result_unpriv;
enum bpf_prog_type prog_type;
+ uint8_t flags;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2432,6 +2443,30 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2934,6 +2969,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a variable",
@@ -2957,6 +2993,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a signed variable",
@@ -2984,6 +3021,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a constant",
@@ -3025,6 +3063,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is outside of the array range",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a variable",
@@ -3048,6 +3087,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with no floor check",
@@ -3074,6 +3114,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
@@ -3100,6 +3141,7 @@ static struct bpf_test tests[] = {
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
@@ -3129,6 +3171,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"multiple registers share map_lookup_elem result",
@@ -3252,6 +3295,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"constant register |= constant should keep constant type",
@@ -3418,6 +3462,26 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+ },
+ {
"invalid access of tc_classid for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -3961,7 +4025,208 @@ static struct bpf_test tests[] = {
.result_unpriv = REJECT,
},
{
- "map element value (adjusted) is preserved across register spilling",
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3983,6 +4248,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4421,6 +4687,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid range check",
@@ -4452,6 +4719,7 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}
};
@@ -4530,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
+ int fd_prog, expected_ret, reject_from_alignment;
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
int prog_type = test->prog_type;
int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
- int fd_prog, expected_ret;
const char *expected_err;
do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
@@ -4547,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
+
+ reject_from_alignment = fd_prog < 0 &&
+ (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
+ strstr(bpf_vlog, "Unknown alignment.");
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (reject_from_alignment) {
+ printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
+ strerror(errno));
+ goto fail_log;
+ }
+#endif
if (expected_ret == ACCEPT) {
- if (fd_prog < 0) {
+ if (fd_prog < 0 && !reject_from_alignment) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(errno));
goto fail_log;
@@ -4558,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
- if (!strstr(bpf_vlog, expected_err)) {
+ if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
printf("FAIL\nUnexpected error message!\n");
goto fail_log;
}
}
(*passes)++;
- printf("OK\n");
+ printf("OK%s\n", reject_from_alignment ?
+ " (NOTE: reject due to unknown alignment)" : "");
close_fds:
close(fd_prog);
close(fd_f1);
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1c5d0575802e..bf13fc2297aa 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -34,34 +34,34 @@ endif
all: $(SUB_DIRS)
$(SUB_DIRS):
- BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
+ BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
include ../lib.mk
override define RUN_TESTS
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
done;
endef
override define INSTALL_RULE
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
done;
endef
override define EMIT_TESTS
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
done;
endef
clean:
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
done;
rm -f tags
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
}
/**
+ * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
+ *
+ * For a specific CPU, initialize the GIC VE hardware.
+ */
+void kvm_vgic_init_cpu_hardware(void)
+{
+ BUG_ON(preemptible());
+
+ /*
+ * We want to make sure the list registers start out clear so that we
+ * only have the program the used registers.
+ */
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_init_lrs();
+ else
+ kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
+/**
* kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
* according to the host GIC model. Accordingly calls either
* vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
val = vmcr.ctlr;
break;
case GIC_CPU_PRIMASK:
- val = vmcr.pmr;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
+ GICV_PMR_PRIORITY_SHIFT;
break;
case GIC_CPU_BINPOINT:
val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
vmcr.ctlr = val;
break;
case GIC_CPU_PRIMASK:
- vmcr.pmr = val;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
+ GICV_PMR_PRIORITY_MASK;
break;
case GIC_CPU_BINPOINT:
vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
return (unsigned long *)val;
}
+static inline void vgic_v2_write_lr(int lr, u32 val)
+{
+ void __iomem *base = kvm_vgic_global_state.vctrl_base;
+
+ writel_relaxed(val, base + GICH_LR0 + (lr * 4));
+}
+
+void vgic_v2_init_lrs(void)
+{
+ int i;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
+ vgic_v2_write_lr(i, 0);
+}
+
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
GICH_VMCR_BINPOINT_MASK;
- vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
- GICH_VMCR_PRIMASK_MASK;
+ vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
+ GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
GICH_VMCR_BINPOINT_SHIFT;
- vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
- GICH_VMCR_PRIMASK_SHIFT;
+ vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
+ GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
}
void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
return irq->pending_latch || irq->line_level;
}
+/*
+ * This struct provides an intermediate representation of the fields contained
+ * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+ * state to userspace can generate either GICv2 or GICv3 CPU interface
+ * registers regardless of the hardware backed GIC used.
+ */
struct vgic_vmcr {
u32 ctlr;
u32 abpr;
u32 bpr;
- u32 pmr;
+ u32 pmr; /* Priority mask field in the GICC_PMR and
+ * ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32 grpen0;
u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type);
+void vgic_v2_init_lrs(void);
+
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)