summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/tagged-address-abi.rst26
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml2
-rw-r--r--Documentation/trace/histogram.rst2
-rw-r--r--Documentation/virt/kvm/api.rst30
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/boot/bootp.c2
-rw-r--r--arch/alpha/boot/bootpz.c2
-rw-r--r--arch/alpha/boot/misc.c2
-rw-r--r--arch/alpha/configs/defconfig1
-rw-r--r--arch/alpha/include/asm/compiler.h11
-rw-r--r--arch/alpha/include/asm/syscall.h6
-rw-r--r--arch/alpha/kernel/osf_sys.c4
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/setup.c13
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/traps.c2
-rw-r--r--arch/alpha/math-emu/math.c8
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/net/bpf_jit_32.c3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi2
-rw-r--r--arch/arm64/kvm/mmu.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c13
-rw-r--r--arch/h8300/Kconfig.cpu1
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/coldfire/m525x.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/net/ebpf_jit.c3
-rw-r--r--arch/nds32/mm/mmap.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c25
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c25
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c6
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c1
-rw-r--r--arch/riscv/include/asm/efi.h4
-rw-r--r--arch/riscv/kernel/stacktrace.c6
-rw-r--r--arch/riscv/lib/uaccess.S27
-rw-r--r--arch/riscv/mm/init.c32
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c4
-rw-r--r--arch/s390/include/asm/kvm_host.h18
-rw-r--r--arch/s390/kvm/diag.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c18
-rw-r--r--arch/s390/net/bpf_jit_comp.c5
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c3
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/jump_label.c7
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/svm/avic.c2
-rw-r--r--arch/x86/kvm/svm/nested.c10
-rw-r--r--arch/x86/kvm/svm/svm.c26
-rw-r--r--arch/x86/kvm/svm/svm.h6
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c7
-rw-r--r--arch/x86/net/bpf_jit_comp32.c6
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--block/blk-iocost.c11
-rw-r--r--block/blk-mq-sched.c17
-rw-r--r--block/genhd.c3
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c51
-rw-r--r--drivers/acpi/resource.c9
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/acpi/x86/s2idle.c10
-rw-r--r--drivers/ata/libata-sff.c35
-rw-r--r--drivers/base/auxiliary.c8
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/loop.c128
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/bus/mhi/core/main.c17
-rw-r--r--drivers/bus/mhi/pci_generic.c45
-rw-r--r--drivers/firmware/efi/dev-path-parser.c1
-rw-r--r--drivers/firmware/efi/efi.c13
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c4
-rw-r--r--drivers/firmware/efi/mokvar-table.c5
-rw-r--r--drivers/firmware/efi/tpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c248
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c176
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c46
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c227
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c15
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c5
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c49
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c2
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-ft260.c27
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c15
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c29
-rw-r--r--drivers/hid/usbhid/Kconfig2
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h1
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c13
-rw-r--r--drivers/infiniband/hw/irdma/hw.c11
-rw-r--r--drivers/infiniband/hw/irdma/main.c9
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c5
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c27
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c6
-rw-r--r--drivers/media/pci/ngene/ngene-core.c2
-rw-r--r--drivers/media/pci/ngene/ngene.h14
-rw-r--r--drivers/misc/eeprom/at24.c17
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/esd_usb2.c16
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c10
-rw-r--r--drivers/net/can/usb/usb_8dev.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c197
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c41
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c52
-rw-r--r--drivers/nfc/nfcsim.c3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nvme/host/core.c19
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h11
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/trace.h6
-rw-r--r--drivers/platform/x86/amd-pmc.c249
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/think-lmi.c45
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c1
-rw-r--r--drivers/scsi/arm/fas216.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c32
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c90
-rw-r--r--drivers/target/target_core_sbc.c35
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/dwc2/gadget.c31
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc2/params.c1
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c10
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c18
-rw-r--r--drivers/usb/host/max3421-hcd.c44
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c16
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/phy.c10
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c7
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/stusb160x.c20
-rw-r--r--drivers/usb/typec/tipd/core.c9
-rw-r--r--fs/Kconfig.binfmt15
-rw-r--r--fs/Makefile1
-rw-r--r--fs/binfmt_em86.c110
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/delayed-ref.c4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/qgroup.c38
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c20
-rw-r--r--fs/btrfs/tree-log.c35
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/btrfs/zoned.c12
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/cifs/cifssmb.c10
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/dfs_cache.c229
-rw-r--r--fs/cifs/dfs_cache.h3
-rw-r--r--fs/cifs/fs_context.c7
-rw-r--r--fs/cifs/smb2ops.c49
-rw-r--r--fs/ext2/dir.c12
-rw-r--r--fs/ext2/ext2.h3
-rw-r--r--fs/ext2/namei.c4
-rw-r--r--fs/fs-writeback.c3
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/internal.h1
-rw-r--r--fs/io-wq.c7
-rw-r--r--fs/io_uring.c93
-rw-r--r--fs/ocfs2/file.c103
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/reiserfs/stree.c31
-rw-r--r--fs/reiserfs/super.c8
-rw-r--r--fs/userfaultfd.c26
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/drm/drm_ioctl.h1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/fs_context.h1
-rw-r--r--include/linux/highmem.h6
-rw-r--r--include/linux/intel-ish-client-if.h2
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/skmsg.h54
-rw-r--r--include/net/llc_pdu.h31
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/uapi/linux/idxd.h2
-rw-r--r--include/uapi/rdma/irdma-abi.h2
-rw-r--r--kernel/bpf/core.c19
-rw-r--r--kernel/bpf/disasm.c16
-rw-r--r--kernel/bpf/verifier.c148
-rw-r--r--kernel/cgroup/cgroup-v1.c4
-rw-r--r--kernel/dma/ops_helpers.c12
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/time/posix-cpu-timers.c10
-rw-r--r--kernel/time/timer.c8
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/ring_buffer.c28
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_events_hist.c22
-rw-r--r--kernel/trace/trace_events_synth.c8
-rw-r--r--kernel/trace/trace_synth.h2
-rw-r--r--kernel/tracepoint.c2
-rw-r--r--kernel/workqueue.c20
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/kfence/core.c19
-rw-r--r--mm/kfence/kfence_test.c2
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory.c11
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap_lock.c4
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/secretmem.c1
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slub.c22
-rw-r--r--net/can/j1939/transport.c11
-rw-r--r--net/can/raw.c20
-rw-r--r--net/core/devlink.c10
-rw-r--r--net/core/flow_dissector.c18
-rw-r--r--net/core/skmsg.c39
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/llc/af_llc.c10
-rw-r--r--net/llc/llc_s_ac.c2
-rw-r--r--net/mac80211/cfg.c19
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/tx.c57
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_flow_table_core.c6
-rw-r--r--net/netfilter/nf_tables_api.c12
-rw-r--r--net/netfilter/nfnetlink_hook.c2
-rw-r--r--net/netfilter/nft_last.c20
-rw-r--r--net/netfilter/nft_nat.c4
-rw-r--r--net/qrtr/qrtr.c6
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/transport.c45
-rw-r--r--net/tipc/crypto.c14
-rw-r--r--net/tipc/socket.c30
-rw-r--r--net/unix/af_unix.c51
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/scan.c6
-rw-r--r--sound/core/pcm_native.c27
-rw-r--r--sound/hda/intel-dsp-config.c4
-rw-r--r--sound/isa/sb/sb16_csp.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c5
-rw-r--r--sound/soc/codecs/Kconfig8
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/rt5682.c8
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h4
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c27
-rw-r--r--sound/soc/codecs/wcd938x.c18
-rw-r--r--sound/soc/codecs/wm_adsp.c6
-rw-r--r--sound/soc/intel/boards/sof_sdw_max98373.c81
-rw-r--r--sound/soc/soc-pcm.c22
-rw-r--r--sound/soc/sof/intel/pci-tgl.c1
-rw-r--r--sound/soc/tegra/tegra_pcm.c30
-rw-r--r--sound/soc/ti/j721e-evm.c18
-rw-r--r--sound/usb/mixer.c10
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c229
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c3
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c429
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c1
-rw-r--r--tools/testing/selftests/kvm/steal_time.c2
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c6
-rw-r--r--virt/kvm/kvm_main.c29
426 files changed, 4465 insertions, 2564 deletions
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
index 459e6b66ff68..0c9120ec58ae 100644
--- a/Documentation/arm64/tagged-address-abi.rst
+++ b/Documentation/arm64/tagged-address-abi.rst
@@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
- tagged pointers in this context is allowed with the exception of
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
- ``mremap()`` as these have the potential to alias with existing
- user addresses.
-
- NOTE: This behaviour changed in v5.6 and so some earlier kernels may
- incorrectly accept valid tagged pointers for the ``brk()``,
- ``mmap()`` and ``mremap()`` system calls.
+ tagged pointers in this context is allowed with these exceptions:
+
+ - ``brk()``, ``mmap()`` and the ``new_address`` argument to
+ ``mremap()`` as these have the potential to alias with existing
+ user addresses.
+
+ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
+ incorrectly accept valid tagged pointers for the ``brk()``,
+ ``mmap()`` and ``mremap()`` system calls.
+
+ - The ``range.start``, ``start`` and ``dst`` arguments to the
+ ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
+ ``userfaultfd()``, as fault addresses subsequently obtained by reading
+ the file descriptor will be untagged, which may otherwise confuse
+ tag-unaware programs.
+
+ NOTE: This behaviour changed in v5.14 and so some earlier kernels may
+ incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index ee936d1aa724..c2930d65728e 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -114,7 +114,7 @@ properties:
ports:
$ref: /schemas/graph.yaml#/properties/ports
- properties:
+ patternProperties:
port(@[0-9a-f]+)?:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index b71e09f745c3..f99be8062bc8 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
with the event, in nanoseconds. May be
modified by .usecs to have timestamps
interpreted as microseconds.
- cpu int the cpu on which the event occurred.
+ common_cpu int the cpu on which the event occurred.
====================== ==== =======================================
Extended error information
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index c7b165ca70b6..dae68e68ca23 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
use PPIs designated for specific cpus. The irq field is interpreted
like this::
-  bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
+ bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
field: | vcpu2_index | irq_type | vcpu_index | irq_id |
The irq_type field has the following values:
@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
Errors:
====== ============================================================
-  ENOENT   no such register
-  EINVAL   invalid register ID, or no such register or used with VMs in
+ ENOENT no such register
+ EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
-  EPERM    (arm64) register access not allowed before vcpu finalization
+ EPERM (arm64) register access not allowed before vcpu finalization
====== ============================================================
(These error codes are indicative only: do not rely on a specific error
@@ -2590,10 +2590,10 @@ following id bit patterns::
Errors include:
======== ============================================================
-  ENOENT   no such register
-  EINVAL   invalid register ID, or no such register or used with VMs in
+ ENOENT no such register
+ EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
-  EPERM    (arm64) register access not allowed before vcpu finalization
+ EPERM (arm64) register access not allowed before vcpu finalization
======== ============================================================
(These error codes are indicative only: do not rely on a specific error
@@ -3112,13 +3112,13 @@ current state. "addr" is ignored.
Errors:
====== =================================================================
-  EINVAL    the target is unknown, or the combination of features is invalid.
-  ENOENT    a features bit specified is unknown.
+ EINVAL the target is unknown, or the combination of features is invalid.
+ ENOENT a features bit specified is unknown.
====== =================================================================
This tells KVM what type of CPU to present to the guest, and what
-optional features it should have.  This will cause a reset of the cpu
-registers to their initial values.  If this is not called, KVM_RUN will
+optional features it should have. This will cause a reset of the cpu
+registers to their initial values. If this is not called, KVM_RUN will
return ENOEXEC for that vcpu.
The initial values are defined as:
@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
Errors:
===== ==============================================================
-  E2BIG     the reg index list is too big to fit in the array specified by
-             the user (the number required will be written into n).
+ E2BIG the reg index list is too big to fit in the array specified by
+ the user (the number required will be written into n).
===== ==============================================================
::
@@ -3288,7 +3288,7 @@ specific device.
ARM/arm64 divides the id field into two parts, a device id and an
address type id specific to the individual device::
-  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
+ bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
field: | 0x00000000 | device id | addr type id |
ARM/arm64 currently only require this when using the in-kernel GIC
@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
-8.28 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
-----------------------------
Architectures: x86
diff --git a/MAINTAINERS b/MAINTAINERS
index 73beb91891ee..41fcfdb24a81 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -445,7 +445,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
ACRN HYPERVISOR SERVICE MODULE
-M: Shuo Liu <shuo.a.liu@intel.com>
+M: Fei Li <fei1.li@intel.com>
L: acrn-dev@lists.projectacrn.org (subscribers-only)
S: Supported
W: https://projectacrn.org
@@ -7859,9 +7859,9 @@ S: Maintained
F: drivers/input/touchscreen/goodix.c
GOOGLE ETHERNET DRIVERS
-M: Catherine Sullivan <csully@google.com>
-R: Sagi Shahar <sagis@google.com>
-R: Jon Olson <jonolson@google.com>
+M: Jeroen de Borst <jeroendb@google.com>
+R: Catherine Sullivan <csully@google.com>
+R: David Awogbemila <awogbemila@google.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -11347,6 +11347,12 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/phy/mxl-gpy.c
+MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+R: Yasushi SHOJI <yashi@spacecubics.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: drivers/net/can/usb/mcba_usb.c
+
MCAN MMIO DEVICE DRIVER
M: Chandrasekar Ramakrishnan <rcsekar@samsung.com>
L: linux-can@vger.kernel.org
@@ -15488,6 +15494,8 @@ M: Pan, Xinhui <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
+B: https://gitlab.freedesktop.org/drm/amd/-/issues
+C: irc://irc.oftc.net/radeon
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@@ -19143,7 +19151,7 @@ M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
-F: drivers/phy/hisilicon/phy-kirin970-usb3.c
+F: drivers/phy/hisilicon/phy-hi3670-usb3.c
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
@@ -19821,6 +19829,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
+VMWARE VMCI DRIVER
+M: Jorgen Hansen <jhansen@vmware.com>
+M: Vishnu Dasa <vdasa@vmware.com>
+L: linux-kernel@vger.kernel.org
+L: pv-drivers@vmware.com (private)
+S: Maintained
+F: drivers/misc/vmw_vmci/
+
VMWARE VMMOUSE SUBDRIVER
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
diff --git a/Makefile b/Makefile
index e4f5895badb5..6b555f64df06 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Opossums on Parade
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 77d3280dc678..6c50877841df 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -14,7 +14,6 @@ config ALPHA
select PCI_SYSCALL if PCI
select HAVE_AOUT
select HAVE_ASM_MODVERSIONS
- select HAVE_IDE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
@@ -532,7 +531,7 @@ config SMP
will run faster if you say N here.
See also the SMP-HOWTO available at
- <http://www.tldp.org/docs.html#howto>.
+ <https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index 00266e6e1b71..b4faba2432d5 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -23,7 +23,7 @@
#include "ksize.h"
extern unsigned long switch_to_osf_pal(unsigned long nr,
- struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+ struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
unsigned long *vptb);
extern void move_stack(unsigned long new_stack);
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index 43af71835adf..90a2b341e9c0 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -200,7 +200,7 @@ extern char _end;
START_ADDR KSEG address of the entry point of kernel code.
ZERO_PGE KSEG address of page full of zeroes, but
- upon entry to kerne cvan be expected
+ upon entry to kernel, it can be expected
to hold the parameter list and possible
INTRD information.
diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c
index d65192202703..325d4dd4f904 100644
--- a/arch/alpha/boot/misc.c
+++ b/arch/alpha/boot/misc.c
@@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
__attribute__ ((format (printf, 1, 2)));
/*
- * gzip delarations
+ * gzip declarations
*/
#define OF(args) args
#define STATIC static
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index dd2dd9f0861f..7f1ca30b115b 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
CONFIG_MATHEMU=y
CONFIG_CRYPTO_HMAC=y
+CONFIG_DEVTMPFS=y
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h
index 5159ba259d65..ae645959018a 100644
--- a/arch/alpha/include/asm/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
@@ -4,15 +4,4 @@
#include <uapi/asm/compiler.h>
-/* Some idiots over in <linux/compiler.h> thought inline should imply
- always_inline. This breaks stuff. We'll include this file whenever
- we run into such problems. */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline inline __attribute__((always_inline))
-
#endif /* __ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/asm/syscall.h b/arch/alpha/include/asm/syscall.h
index 11c688c1d7ec..f21babaeed85 100644
--- a/arch/alpha/include/asm/syscall.h
+++ b/arch/alpha/include/asm/syscall.h
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_ALPHA;
}
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
#endif /* _ASM_ALPHA_SYSCALL_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index d5367a1c6300..d31167e3269c 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
return -EFAULT;
state = &current_thread_info()->ieee_state;
- /* Update softare trap enable bits. */
+ /* Update software trap enable bits. */
*state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
/* Update the real fpcr. */
@@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
state = &current_thread_info()->ieee_state;
exc &= IEEE_STATUS_MASK;
- /* Update softare trap enable bits. */
+ /* Update software trap enable bits. */
swcr = (*state & IEEE_SW_MASK) | exc;
*state |= exc;
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index e7a59d927d78..efcf7321701b 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
- * EV67 but we don't do suffiently deep CPU detection to detect them.
+ * EV67 but we don't do sufficiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index ef0c08ed0481..a5123ea426ce 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = kthread_arg;
- childregs->hae = alpha_mv.hae_cache,
+ childregs->hae = alpha_mv.hae_cache;
childti->pcb.usp = 0;
return 0;
}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 7d56c217b235..b4fbbba30aa2 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages);
- /* Bit 0 is console/PALcode reserved. Bit 1 is
- non-volatile memory -- we might want to mark
- this for later. */
- if (cluster->usage & 3)
- continue;
-
end = cluster->start_pfn + cluster->numpages;
if (end > max_low_pfn)
max_low_pfn = end;
memblock_add(PFN_PHYS(cluster->start_pfn),
cluster->numpages << PAGE_SHIFT);
+
+ /* Bit 0 is console/PALcode reserved. Bit 1 is
+ non-volatile memory -- we might want to mark
+ this for later. */
+ if (cluster->usage & 3)
+ memblock_reserve(PFN_PHYS(cluster->start_pfn),
+ cluster->numpages << PAGE_SHIFT);
}
/*
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4b2575f936d4..cb64e4797d2a 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -582,7 +582,7 @@ void
smp_send_stop(void)
{
cpumask_t to_whom;
- cpumask_copy(&to_whom, cpu_possible_mask);
+ cpumask_copy(&to_whom, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &to_whom);
#ifdef DEBUG_IPI_MSG
if (hard_smp_processor_id() != boot_cpu_id)
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 53adf43dcd44..96fd6ff3fe81 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -212,7 +212,7 @@ nautilus_init_pci(void)
/* Use default IO. */
pci_add_resource(&bridge->windows, &ioport_resource);
- /* Irongate PCI memory aperture, calculate requred size before
+ /* Irongate PCI memory aperture, calculate required size before
setting it up. */
pci_add_resource(&bridge->windows, &irongate_mem);
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 921d4b6e4d95..5398f982bdd1 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
long error;
/* Check the UAC bits to decide what the user wants us to do
- with the unaliged access. */
+ with the unaligned access. */
if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
if (__ratelimit(&ratelimit)) {
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index d568cd9a3e43..f7cef66af88d 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
long do_alpha_fp_emul(unsigned long);
-int init_module(void)
+static int alpha_fp_emul_init_module(void)
{
save_emul_imprecise = alpha_fp_emul_imprecise;
save_emul = alpha_fp_emul;
@@ -73,12 +73,14 @@ int init_module(void)
alpha_fp_emul = do_alpha_fp_emul;
return 0;
}
+module_init(alpha_fp_emul_init_module);
-void cleanup_module(void)
+static void alpha_fp_emul_cleanup_module(void)
{
alpha_fp_emul_imprecise = save_emul_imprecise;
alpha_fp_emul = save_emul;
}
+module_exit(alpha_fp_emul_cleanup_module);
#undef alpha_fp_emul_imprecise
#define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise
@@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress:
return si_code;
}
+
+EXPORT_SYMBOL(__udiv_qrnnd);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 82f908fa5676..2fb7012c3246 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -95,7 +95,6 @@ config ARM
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
- select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -361,7 +360,6 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
- select HAVE_IDE
select NEED_MACH_IO_H if !MMU
select NEED_MACH_MEMORY_H
help
@@ -430,7 +428,6 @@ config ARCH_PXA
select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
- select HAVE_IDE
select IRQ_DOMAIN
select PLAT_PXA
select SPARSE_IRQ
@@ -446,7 +443,6 @@ config ARCH_RPC
select ARM_HAS_SG_CHAIN
select CPU_SA110
select FIQ
- select HAVE_IDE
select HAVE_PATA_PLATFORM
select ISA_DMA_API
select LEGACY_TIMER_TICK
@@ -469,7 +465,6 @@ config ARCH_SA1100
select CPU_SA1100
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select HAVE_IDE
select IRQ_DOMAIN
select ISA
select NEED_MACH_MEMORY_H
@@ -505,7 +500,6 @@ config ARCH_OMAP1
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select HAVE_IDE
select HAVE_LEGACY_CLK
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index de11030748d0..1d3aef84287d 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER
- select HAVE_IDE
select PINCTRL_SINGLE
if ARCH_DAVINCI
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index d23970bd638d..f70fb9c4b0cb 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
+ break;
default:
break;
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 897634d0a67c..a951276f0547 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1602,6 +1602,9 @@ exit:
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index ca38d0d6c3c4..f4eaab3ecf03 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -579,7 +579,7 @@
};
flexcan1: can@308c0000 {
- compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+ compatible = "fsl,imx8mp-flexcan";
reg = <0x308c0000 0x10000>;
interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
@@ -594,7 +594,7 @@
};
flexcan2: can@308d0000 {
- compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+ compatible = "fsl,imx8mp-flexcan";
reg = <0x308d0000 0x10000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index 068692350e00..51e17094d7b1 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -1063,7 +1063,7 @@
status = "okay";
extcon = <&usb2_id>;
- usb@7600000 {
+ dwc3@7600000 {
extcon = <&usb2_id>;
dr_mode = "otg";
maximum-speed = "high-speed";
@@ -1074,7 +1074,7 @@
status = "okay";
extcon = <&usb3_id>;
- usb@6a00000 {
+ dwc3@6a00000 {
extcon = <&usb3_id>;
dr_mode = "otg";
};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 95d6cb8cd4c0..f39bc10cc5bd 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -443,7 +443,7 @@
resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
- dwc_0: usb@8a00000 {
+ dwc_0: dwc3@8a00000 {
compatible = "snps,dwc3";
reg = <0x8a00000 0xcd00>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
@@ -484,7 +484,7 @@
resets = <&gcc GCC_USB1_BCR>;
status = "disabled";
- dwc_1: usb@8c00000 {
+ dwc_1: dwc3@8c00000 {
compatible = "snps,dwc3";
reg = <0x8c00000 0xcd00>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 0e1bc4669d7e..78c55ca10ba9 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -2566,7 +2566,7 @@
power-domains = <&gcc USB30_GDSC>;
status = "disabled";
- usb@6a00000 {
+ dwc3@6a00000 {
compatible = "snps,dwc3";
reg = <0x06a00000 0xcc00>;
interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
@@ -2873,7 +2873,7 @@
qcom,select-utmi-as-pipe-clk;
status = "disabled";
- usb@7600000 {
+ dwc3@7600000 {
compatible = "snps,dwc3";
reg = <0x07600000 0xcc00>;
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 6f294f9c0cdf..e9d3ce29937c 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -1964,7 +1964,7 @@
resets = <&gcc GCC_USB_30_BCR>;
- usb3_dwc3: usb@a800000 {
+ usb3_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xcd00>;
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index f8a55307b855..a80c578484ba 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -337,7 +337,7 @@
&usb3 {
status = "okay";
- usb@7580000 {
+ dwc3@7580000 {
dr_mode = "host";
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index 9c4be020d568..339790ba585d 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -544,7 +544,7 @@
assigned-clock-rates = <19200000>, <200000000>;
status = "disabled";
- usb@7580000 {
+ dwc3@7580000 {
compatible = "snps,dwc3";
reg = <0x07580000 0xcd00>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@@ -573,7 +573,7 @@
assigned-clock-rates = <19200000>, <133333333>;
status = "disabled";
- usb@78c0000 {
+ dwc3@78c0000 {
compatible = "snps,dwc3";
reg = <0x078c0000 0xcc00>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 7af551a1fd90..7e0ff917e548 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -2761,7 +2761,7 @@
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_1_dwc3: usb@a600000 {
+ usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1796ae8372be..0a86fe71a66d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3781,7 +3781,7 @@
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_1_dwc3: usb@a600000 {
+ usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
@@ -3829,7 +3829,7 @@
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_2_dwc3: usb@a800000 {
+ usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 612dda0fef43..eef9d79157e9 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -2344,7 +2344,7 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
- usb_1_dwc3: usb@a600000 {
+ usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 3155c9e778f0..0625bf2353c2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = get_vma_page_shift(vma, hva);
}
- shared = (vma->vm_flags & VM_PFNMAP);
+ shared = (vma->vm_flags & VM_SHARED);
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index dccf98a37283..41c23f474ea6 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -823,6 +823,19 @@ emit_cond_jmp:
return ret;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ /*
+ * Nothing required here.
+ *
+ * In case of arm64, we rely on the firmware mitigation of
+ * Speculative Store Bypass as controlled via the ssbd kernel
+ * parameter. Whenever the mitigation is enabled, it works
+ * for all of the kernel code with no need to provide any
+ * additional instructions.
+ */
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index b5e14d513e62..c30baa0499fc 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -44,7 +44,6 @@ config H8300_H8MAX
bool "H8MAX"
select H83069
select RAMKERNEL
- select HAVE_IDE
help
H8MAX Evaluation Board Support
More Information. (Japanese Only)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cf425c2c63af..4993c7ac7ff6 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,7 +25,6 @@ config IA64
select HAVE_ASM_MODVERSIONS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
- select HAVE_IDE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 96989ad46f66..d632a1d576f9 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -23,7 +23,6 @@ config M68K
select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
- select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index d964c1f27399..6a07a6817885 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -33,6 +33,7 @@ config MAC
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
+ select HAVE_PATA_PLATFORM
select LEGACY_TIMER_TICK
help
This option enables support for the Apple Macintosh series of
diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c
index 2c4d2ca2f20d..485375112e28 100644
--- a/arch/m68k/coldfire/m525x.c
+++ b/arch/m68k/coldfire/m525x.c
@@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
static struct clk_lookup m525x_clk_lookup[] = {
- CLKDEV_INIT(NULL, "pll.0", &pll),
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
CLKDEV_INIT(NULL, "sys.0", &clk_sys),
CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cee6087cd686..6dfb27d531dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -71,7 +71,6 @@ config MIPS
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 939dd06764bc..3a73e9375712 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1355,6 +1355,9 @@ jeq_common:
}
break;
+ case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+ break;
+
case BPF_ST | BPF_B | BPF_MEM:
case BPF_ST | BPF_H | BPF_MEM:
case BPF_ST | BPF_W | BPF_MEM:
diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c
index c206b31ce07a..1bdf5e7d1b43 100644
--- a/arch/nds32/mm/mmap.c
+++ b/arch/nds32/mm/mmap.c
@@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bde9907bc5b2..4f8c1fbf8f2f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -3,7 +3,6 @@ config PARISC
def_bool y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
- select HAVE_IDE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d01e3401581d..663766fbf505 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -220,7 +220,6 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1d1fcc290fca..085fb8ecbf68 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
+#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 8543ad538b0c..898f942eb198 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
+ if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+ return H_BAD_MODE;
+
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
@@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_hv.vcpu_token >= NR_CPUS)
return H_PARAMETER;
+ /*
+ * L1 must have set up a suspended state to enter the L2 in a
+ * transactional state, and only in that case. These have to be
+ * filtered out here to prevent causing a TM Bad Thing in the
+ * host HRFID. We could synthesize a TM Bad Thing back to the L1
+ * here but there doesn't seem like much point.
+ */
+ if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
+ if (!MSR_TM_ACTIVE(l2_regs.msr))
+ return H_BAD_MODE;
+ } else {
+ if (l2_regs.msr & MSR_TS_MASK)
+ return H_BAD_MODE;
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
+ return H_BAD_MODE;
+ }
+
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 83f592eadcd2..961b3d70483c 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tm_return_to_guest:
+#endif
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
@@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* is in real suspend mode and is trying to transition to
* transactional mode.
*/
- if (local_paca->kvm_hstate.fake_suspend &&
+ if (!local_paca->kvm_hstate.fake_suspend &&
(vcpu->arch.shregs.msr & MSR_TS_S)) {
if (kvmhv_p9_tm_emulation_early(vcpu)) {
- /* Prevent it being handled again. */
- trap = 0;
+ /*
+ * Go straight back into the guest with the
+ * new NIP/MSR as set by TM emulation.
+ */
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
+
+ /*
+ * tm_return_to_guest re-loads SRR0/1, DAR,
+ * DSISR after RI is cleared, in case they had
+ * been clobbered by a MCE.
+ */
+ __mtmsrd(0, 1); /* clear RI */
+ goto tm_return_to_guest;
}
}
#endif
@@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ vcpu->arch.shregs.msr & MSR_TS_MASK)
+ msr |= MSR_TS_S;
+
__mtmsrd(msr, 0);
end_timing(vcpu);
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index c5e677508d3b..0f847f1e5ddd 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
+ if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
+ /*
+ * Don't overflow our args array: ensure there is room for
+ * at least rets[0] (even if the call specifies 0 nret).
+ *
+ * Each handler must then check for the correct nargs and nret
+ * values, but they may always return failure in rets[0].
+ */
+ rc = -EINVAL;
+ goto fail;
+ }
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
fail:
/*
* We only get here if the guest has called RTAS with a bogus
- * args pointer. That means we can't get to the args, and so we
- * can't fail the RTAS call. So fail right out to userspace,
- * which should kill the guest.
+ * args pointer or nargs/nret values that would overflow the
+ * array. That means we can't get to the args, and so we can't
+ * fail the RTAS call. So fail right out to userspace, which
+ * should kill the guest.
+ *
+ * SLOF should actually pass the hcall return value from the
+ * rtas handler call in r3, so enter_rtas could be modified to
+ * return a failure indication in r3 and we could return such
+ * errors to the guest rather than failing to host userspace.
+ * However old guests that don't test for failure could then
+ * continue silently after errors, so for now we won't do this.
*/
return rc;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index be33b5321a76..b4e6f70b97b9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_enable_cap cap;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu_put(vcpu);
break;
@@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu_put(vcpu);
break;
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 34bb1583fc0c..beb12cbc8c29 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -738,6 +738,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index de8595880fee..b87a63dba9c8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -628,6 +628,12 @@ emit_clear:
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 9b88e3cded7d..534b0317fc15 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -42,6 +42,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
+ break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 6d98cd999680..7b3483ba2e84 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return image_addr + SZ_256M;
+ return ULONG_MAX;
}
#define alloc_screen_info(x...) (&screen_info)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index ff467b98c3e3..ac7593607fa6 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && !task_is_running(task)))
+ if (likely(task && task != current && !task_is_running(task))) {
+ if (!try_get_task_stack(task))
+ return 0;
walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
+ }
return pc;
}
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index bceb0629e440..63bc691cff91 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
* t0 - end of uncopied dst
*/
add t0, a0, a2
- bgtu a0, t0, 5f
/*
* Use byte copy only if too small.
+ * SZREG holds 4 for RV32 and 8 for RV64
*/
- li a3, 8*SZREG /* size must be larger than size in word_copy */
+ li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
- * Copy first bytes until dst is align to word boundary.
+ * Copy first bytes until dst is aligned to word boundary.
* a0 - start of dst
* t1 - start of aligned dst
*/
addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */
- beq a0, t1, .Lskip_first_bytes
+ beq a0, t1, .Lskip_align_dst
1:
/* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */
-.Lskip_first_bytes:
+.Lskip_align_dst:
/*
* Now dst is aligned.
* Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
*
* a0 - start of aligned dst
* a1 - start of aligned src
- * a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
- addi t0, t0, -(8*SZREG-1) /* not to over run */
+ addi t0, t0, -(8*SZREG) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
- addi t0, t0, 8*SZREG-1 /* revert to original value */
+ addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
* For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and
* do some shifts.
- * This is safe because reading less than a word size.
+ * This is safe because reading is less than a word size.
*
* a0 - start of aligned dst
* a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
*/
/* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1)
- /* Converting unaligned src to aligned arc */
+ /* Converting unaligned src to aligned src */
andi a1, a1, ~(SZREG-1)
/*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
* t3 - prev shift
* t4 - current shift
*/
- slli t3, a3, LGREG
+ slli t3, a3, 3 /* converting bytes in a3 to bits */
li a5, SZREG*8
sub t4, a5, t3
- /* Load the first word to combine with seceond word */
+ /* Load the first word to combine with second word */
fixup REG_L a5, 0(a1), 10f
3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
* a1 - start of remaining src
* t0 - end of remaining dst
*/
- bgeu a0, t0, 5f
+ bgeu a0, t0, .Lout_copy_user /* check if end of copy */
4:
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */
-5:
+.Lout_copy_user:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 269fc648ef3d..a14bf3910eec 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -127,10 +127,17 @@ void __init mem_init(void)
}
/*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
*/
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
static int __init early_mem(char *p)
{
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+ phys_addr_t __maybe_unused max_mapped_addr;
phys_addr_t dram_end;
#ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
dram_end = memblock_end_of_DRAM();
+
+#ifndef CONFIG_64BIT
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory.
+ * if end of dram is equal to maximum addressable memory. For 64-bit
+ * kernel, this problem can't happen here as the end of the virtual
+ * address space is occupied by the kernel mapping then this check must
+ * be done in create_kernel_page_table.
*/
+ max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (dram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % map_size) != 0);
+#ifdef CONFIG_64BIT
+ /*
+ * The last 4K bytes of the addressable memory can not be mapped because
+ * of IS_ERR_VALUE macro.
+ */
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
+
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
+ if (end >= __pa(PAGE_OFFSET) + memory_limit)
+ end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 81de865f4c7c..e6497424cbf6 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
return -1;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 87e3bf5b9086..3af4131c22c7 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -939,6 +939,10 @@ out_be:
emit_ld(rd, 0, RV_REG_T1, ctx);
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9b4473f76e56..161a9e12bfb8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
u64 instruction_sigp_init_cpu_reset;
u64 instruction_sigp_cpu_reset;
u64 instruction_sigp_unknown;
- u64 diagnose_10;
- u64 diagnose_44;
- u64 diagnose_9c;
- u64 diagnose_9c_ignored;
- u64 diagnose_9c_forward;
- u64 diagnose_258;
- u64 diagnose_308;
- u64 diagnose_500;
- u64 diagnose_other;
+ u64 instruction_diagnose_10;
+ u64 instruction_diagnose_44;
+ u64 instruction_diagnose_9c;
+ u64 diag_9c_ignored;
+ u64 diag_9c_forward;
+ u64 instruction_diagnose_258;
+ u64 instruction_diagnose_308;
+ u64 instruction_diagnose_500;
+ u64 instruction_diagnose_other;
u64 pfault_sync;
};
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 02c146f9e5cd..807fa9da1e72 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
- vcpu->stat.diagnose_10++;
+ vcpu->stat.instruction_diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
vcpu->run->s.regs.gprs[rx]);
- vcpu->stat.diagnose_258++;
+ vcpu->stat.instruction_diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
- vcpu->stat.diagnose_44++;
+ vcpu->stat.instruction_diagnose_44++;
kvm_vcpu_on_spin(vcpu, true);
return 0;
}
@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
int tid;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
- vcpu->stat.diagnose_9c++;
+ vcpu->stat.instruction_diagnose_9c++;
/* yield to self */
if (tid == vcpu->vcpu_id)
@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5,
"diag time slice end directed to %d: yield forwarded",
tid);
- vcpu->stat.diagnose_9c_forward++;
+ vcpu->stat.diag_9c_forward++;
return 0;
}
@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
return 0;
no_yield:
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
- vcpu->stat.diagnose_9c_ignored++;
+ vcpu->stat.diag_9c_ignored++;
return 0;
}
@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
- vcpu->stat.diagnose_308++;
+ vcpu->stat.instruction_diagnose_308++;
switch (subcode) {
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
int ret;
- vcpu->stat.diagnose_500++;
+ vcpu->stat.instruction_diagnose_500++;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
case 0x500:
return __diag_virtio_hypercall(vcpu);
default:
- vcpu->stat.diagnose_other++;
+ vcpu->stat.instruction_diagnose_other++;
return -EOPNOTSUPP;
}
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b655a7d82bf0..4527ac7b5961 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
- STATS_DESC_COUNTER(VCPU, diagnose_10),
- STATS_DESC_COUNTER(VCPU, diagnose_44),
- STATS_DESC_COUNTER(VCPU, diagnose_9c),
- STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
- STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
- STATS_DESC_COUNTER(VCPU, diagnose_258),
- STATS_DESC_COUNTER(VCPU, diagnose_308),
- STATS_DESC_COUNTER(VCPU, diagnose_500),
- STATS_DESC_COUNTER(VCPU, diagnose_other),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
+ STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
+ STATS_DESC_COUNTER(VCPU, diag_9c_forward),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
STATS_DESC_COUNTER(VCPU, pfault_sync)
};
static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 2ae419f5115a..88419263a89a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1154,6 +1154,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
}
break;
/*
+ * BPF_NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 45a0549421cd..b683b69a4556 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -39,7 +39,6 @@ config SUPERH
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_HW_BREAKPOINT
- select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_IOREMAP_PROT if MMU && !X2TLB
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c5fa7932b550..f0c0f955e169 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -19,7 +19,6 @@ config SPARC
select OF
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
- select HAVE_IDE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_SECCOMP if SPARC64
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 4b8d3c65d266..9a2f20cbd48b 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 1;
break;
}
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 49270655e827..88fb922c23a0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -202,7 +202,6 @@ config X86
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 674906fad43b..68f091ba8443 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
return (struct jump_label_patch){.code = code, .size = size};
}
-static inline void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- int init)
+static __always_inline void
+__jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
{
const struct jump_label_patch jlp = __jump_label_patch(entry, type);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 698969e18fe3..ff005fe738a4 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 660401700075..11e4065e1617 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -43,13 +43,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
- DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
- u8 vectors[KVM_MAX_VCPU_ID];
+ u8 vectors[KVM_MAX_VCPU_ID + 1];
};
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 1d01da64c333..a8ad78a2faa1 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -646,7 +646,7 @@ out:
void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *vmcb = svm->vmcb;
+ struct vmcb *vmcb = svm->vmcb01.ptr;
bool activated = kvm_vcpu_apicv_active(vcpu);
if (!enable_apicv)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 3bd09c50c98b..61738ff8ef33 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -515,7 +515,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
* Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
* avic_physical_id.
*/
- WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+ WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
/* Copied from vmcb01. msrpm_base can be overwritten later. */
svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
@@ -702,8 +702,8 @@ out:
}
/* Copy state save area fields which are handled by VMRUN */
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
- struct vmcb_save_area *to_save)
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+ struct vmcb_save_area *from_save)
{
to_save->es = from_save->es;
to_save->cs = from_save->cs;
@@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
to_save->cpl = 0;
}
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
- svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
nested_load_control_from_vmcb12(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 664d20f0689c..e8ccab50ebf6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1406,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
goto error_free_vmsa_page;
}
- svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
svm->vmcb01.ptr = page_address(vmcb01_page);
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
@@ -1419,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01);
init_vmcb(vcpu);
+ svm_vcpu_init_msrpm(vcpu, svm->msrpm);
+
svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065;
@@ -1568,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
- /* The following fields are ignored when AVIC is enabled */
- WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+ /*
+ * The following fields are ignored when AVIC is enabled
+ */
+ WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
+
svm_set_intercept(svm, INTERCEPT_VINTR);
/*
@@ -2147,11 +2150,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
ret = kvm_skip_emulated_instruction(vcpu);
if (vmload) {
- nested_svm_vmloadsave(vmcb12, svm->vmcb);
+ svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
svm->sysenter_eip_hi = 0;
svm->sysenter_esp_hi = 0;
- } else
- nested_svm_vmloadsave(svm->vmcb, vmcb12);
+ } else {
+ svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+ }
kvm_vcpu_unmap(vcpu, &map, true);
@@ -4344,8 +4348,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
- svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
- map_save.hva + 0x400);
+ svm_copy_vmrun_state(map_save.hva + 0x400,
+ &svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true);
}
@@ -4393,8 +4397,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
&map_save) == -EINVAL)
return 1;
- svm_copy_vmrun_state(map_save.hva + 0x400,
- &svm->vmcb01.ptr->save);
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+ map_save.hva + 0x400);
kvm_vcpu_unmap(vcpu, &map_save, true);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7e2090752d8f..bd0fe94c2920 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
- struct vmcb_save_area *to_save);
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+ struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
index 9b9a55abc29f..c53b8bf8d013 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.h
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
* as we mark it dirty unconditionally towards end of vcpu
* init phase.
*/
- if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+ if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a4fd10604f72..4116567f3d44 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
break;
case MSR_KVM_ASYNC_PF_ACK:
- if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
@@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.apf.msr_int_val;
break;
case MSR_KVM_ASYNC_PF_ACK:
- if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
msr_info->data = 0;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 333650b9372a..0fe6aacef3db 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1219,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
}
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ if (boot_cpu_has(X86_FEATURE_XMM2))
+ /* Emit 'lfence' */
+ EMIT3(0x0F, 0xAE, 0xE8);
+ break;
+
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
if (is_ereg(dst_reg))
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 3da88ded6ee3..3bfda5f502cb 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
i++;
break;
}
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ if (boot_cpu_has(X86_FEATURE_XMM2))
+ /* Emit 'lfence' */
+ EMIT3(0x0F, 0xAE, 0xE8);
+ break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 2332b2156993..3878880469d1 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -327,7 +327,6 @@ config XTENSA_PLATFORM_ISS
config XTENSA_PLATFORM_XT2000
bool "XT2000"
- select HAVE_IDE
help
XT2000 is the name of Tensilica's feature-rich emulation platform.
This hardware is capable of running a full Linux distribution.
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index c2d6bc88d3f1..5fac3757e6e0 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1440,16 +1440,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
return -1;
iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
+ wait->committed = true;
/*
* autoremove_wake_function() removes the wait entry only when it
- * actually changed the task state. We want the wait always
- * removed. Remove explicitly and use default_wake_function().
+ * actually changed the task state. We want the wait always removed.
+ * Remove explicitly and use default_wake_function(). Note that the
+ * order of operations is important as finish_wait() tests whether
+ * @wq_entry is removed without grabbing the lock.
*/
- list_del_init(&wq_entry->entry);
- wait->committed = true;
-
default_wake_function(wq_entry, mode, flags, key);
+ list_del_init_careful(&wq_entry->entry);
return 0;
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c838d81ac058..0f006cabfd91 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -515,17 +515,6 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
percpu_ref_put(&q->q_usage_counter);
}
-static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
-{
- if (hctx->sched_tags) {
- blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
- blk_mq_free_rq_map(hctx->sched_tags, set->flags);
- hctx->sched_tags = NULL;
- }
-}
-
static int blk_mq_sched_alloc_tags(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
@@ -539,8 +528,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
- if (ret)
- blk_mq_sched_free_tags(set, hctx, hctx_idx);
+ if (ret) {
+ blk_mq_free_rq_map(hctx->sched_tags, set->flags);
+ hctx->sched_tags = NULL;
+ }
return ret;
}
diff --git a/block/genhd.c b/block/genhd.c
index af4d2ab4a633..298ee78c1bda 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1079,10 +1079,9 @@ static void disk_release(struct device *dev)
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
- bdput(disk->part0);
if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
blk_put_queue(disk->queue);
- kfree(disk);
+ bdput(disk->part0); /* frees the disk */
}
struct class block_class = {
.name = "block",
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 9d872ea477a6..8f9940f40baa 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
bool "Override ACPI tables from built-in initrd"
depends on ACPI_TABLE_UPGRADE
- depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
+ depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
help
This option provides functionality to override arbitrary ACPI tables
from built-in uncompressed initrd.
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 5fca18296bf6..550b9081fcbc 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -9,6 +9,42 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+struct pch_fivr_resp {
+ u64 status;
+ u64 result;
+};
+
+static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp)
+{
+ struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp};
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NN"), "NN" };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -EFAULT;
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ret;
+
+ obj = buffer.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+ goto release_buffer;
+
+ status = acpi_extract_package(obj, &format, &resp);
+ if (ACPI_FAILURE(status))
+ goto release_buffer;
+
+ if (fivr_resp->status)
+ goto release_buffer;
+
+ ret = 0;
+
+release_buffer:
+ kfree(buffer.pointer);
+ return ret;
+}
+
/*
* Presentation of attributes which are defined for INT1045
* They are:
@@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\
char *buf)\
{\
struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
- unsigned long long val;\
- acpi_status status;\
+ struct pch_fivr_resp fivr_resp;\
+ int status;\
\
- status = acpi_evaluate_integer(acpi_dev->handle, #method,\
- NULL, &val);\
- if (ACPI_SUCCESS(status))\
- return sprintf(buf, "%d\n", (int)val);\
- else\
- return -EINVAL;\
+ status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\
+ if (status)\
+ return status;\
+\
+ return sprintf(buf, "%llu\n", fivr_resp.result);\
}
#define PCH_FIVR_STORE(name, method) \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index dc01fb550b28..ee78a210c606 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -423,13 +423,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
-static bool irq_is_legacy(struct acpi_resource_irq *irq)
-{
- return irq->triggering == ACPI_EDGE_SENSITIVE &&
- irq->polarity == ACPI_ACTIVE_HIGH &&
- irq->shareable == ACPI_EXCLUSIVE;
-}
-
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@@ -468,7 +461,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, irq_is_legacy(irq));
+ irq->shareable, true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index e7ddd281afff..d5cedffeeff9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present);
* Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
- * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
- * in the case of a hotplug event. That said, the caller should ensure that
- * this will never happen.
- *
* The caller is responsible for invoking acpi_dev_put() on the returned device.
+ * On the other hand the function invokes acpi_dev_put() on the given @adev
+ * assuming that its reference counter had been increased beforehand.
*
* See additional information in acpi_dev_present() as well.
*/
@@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
+ acpi_dev_put(adev);
return dev ? to_acpi_device(dev) : NULL;
}
EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 1c507804fb10..fbdbef0ab552 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -378,19 +378,25 @@ static int lps0_device_attach(struct acpi_device *adev,
* AMDI0006:
* - should use rev_id 0x0
* - function mask = 0x3: Should use Microsoft method
+ * AMDI0007:
+ * - Should use rev_id 0x2
+ * - Should only use AMD method
*/
const char *hid = acpi_device_hid(adev);
- rev_id = 0;
+ rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
&lps0_dsm_guid_microsoft);
if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
!strcmp(hid, "AMDI0005"))) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+ } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
+ lps0_dsm_func_mask_microsoft = -EINVAL;
+ acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
} else {
rev_id = 1;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index ae7189d1a568..b71ea4a680b0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -637,6 +637,20 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
}
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
+static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
+ unsigned int offset, size_t xfer_size)
+{
+ bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ unsigned char *buf;
+
+ buf = kmap_atomic(page);
+ qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
+ kunmap_atomic(buf);
+
+ if (!do_write && !PageSlab(page))
+ flush_dcache_page(page);
+}
+
/**
* ata_pio_sector - Transfer a sector of data.
* @qc: Command on going
@@ -648,11 +662,9 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
*/
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
struct page *page;
unsigned int offset;
- unsigned char *buf;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -670,13 +682,20 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
- /* do the actual data transfer */
- buf = kmap_atomic(page);
- ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
- kunmap_atomic(buf);
+ /*
+ * Split the transfer when it splits a page boundary. Note that the
+ * split still has to be dword aligned like all ATA data transfers.
+ */
+ WARN_ON_ONCE(offset % 4);
+ if (offset + qc->sect_size > PAGE_SIZE) {
+ unsigned int split_len = PAGE_SIZE - offset;
- if (!do_write && !PageSlab(page))
- flush_dcache_page(page);
+ ata_pio_xfer(qc, page, offset, split_len);
+ ata_pio_xfer(qc, nth_page(page, 1), 0,
+ qc->sect_size - split_len);
+ } else {
+ ata_pio_xfer(qc, page, offset, qc->sect_size);
+ }
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index adc199dfba3c..6a30264ab2ba 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
{
+ int ret;
+
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
@@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
auxdrv->driver.bus = &auxiliary_bus_type;
auxdrv->driver.mod_name = modname;
- return driver_register(&auxdrv->driver);
+ ret = driver_register(&auxdrv->driver);
+ if (ret)
+ kfree(auxdrv->driver.name);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cadcade65825..f6360490a4a3 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ if (device_is_registered(con)) {
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ sysfs_remove_link(&con->kobj, buf);
+ }
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f37b9e3d833c..f0cdff0c5fbf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -88,6 +88,47 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
+static DEFINE_MUTEX(loop_validate_mutex);
+
+/**
+ * loop_global_lock_killable() - take locks for safe loop_validate_file() test
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo is about to bind another "struct loop_device", false otherwise
+ *
+ * Returns 0 on success, -EINTR otherwise.
+ *
+ * Since loop_validate_file() traverses on other "struct loop_device" if
+ * is_loop_device() is true, we need a global lock for serializing concurrent
+ * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
+ */
+static int loop_global_lock_killable(struct loop_device *lo, bool global)
+{
+ int err;
+
+ if (global) {
+ err = mutex_lock_killable(&loop_validate_mutex);
+ if (err)
+ return err;
+ }
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err && global)
+ mutex_unlock(&loop_validate_mutex);
+ return err;
+}
+
+/**
+ * loop_global_unlock() - release locks taken by loop_global_lock_killable()
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo was about to bind another "struct loop_device", false otherwise
+ */
+static void loop_global_unlock(struct loop_device *lo, bool global)
+{
+ mutex_unlock(&lo->lo_mutex);
+ if (global)
+ mutex_unlock(&loop_validate_mutex);
+}
static int max_part;
static int part_shift;
@@ -672,13 +713,15 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
while (is_loop_device(f)) {
struct loop_device *l;
+ lockdep_assert_held(&loop_validate_mutex);
if (f->f_mapping->host->i_rdev == bdev->bd_dev)
return -EBADF;
l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
- if (l->lo_state != Lo_bound) {
+ if (l->lo_state != Lo_bound)
return -EINVAL;
- }
+ /* Order wrt setting lo->lo_backing_file in loop_configure(). */
+ rmb();
f = l->lo_backing_file;
}
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
@@ -697,13 +740,18 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
- struct file *file = NULL, *old_file;
- int error;
- bool partscan;
+ struct file *file = fget(arg);
+ struct file *old_file;
+ int error;
+ bool partscan;
+ bool is_loop;
- error = mutex_lock_killable(&lo->lo_mutex);
+ if (!file)
+ return -EBADF;
+ is_loop = is_loop_device(file);
+ error = loop_global_lock_killable(lo, is_loop);
if (error)
- return error;
+ goto out_putf;
error = -ENXIO;
if (lo->lo_state != Lo_bound)
goto out_err;
@@ -713,11 +761,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
goto out_err;
- error = -EBADF;
- file = fget(arg);
- if (!file)
- goto out_err;
-
error = loop_validate_file(file, bdev);
if (error)
goto out_err;
@@ -740,7 +783,16 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
+
+ /*
+ * Flush loop_validate_file() before fput(), for l->lo_backing_file
+ * might be pointing at old_file which might be the last reference.
+ */
+ if (!is_loop) {
+ mutex_lock(&loop_validate_mutex);
+ mutex_unlock(&loop_validate_mutex);
+ }
/*
* We must drop file reference outside of lo_mutex as dropping
* the file ref can take open_mutex which creates circular locking
@@ -752,9 +804,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return 0;
out_err:
- mutex_unlock(&lo->lo_mutex);
- if (file)
- fput(file);
+ loop_global_unlock(lo, is_loop);
+out_putf:
+ fput(file);
return error;
}
@@ -1136,22 +1188,22 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
struct block_device *bdev,
const struct loop_config *config)
{
- struct file *file;
- struct inode *inode;
+ struct file *file = fget(config->fd);
+ struct inode *inode;
struct address_space *mapping;
- int error;
- loff_t size;
- bool partscan;
- unsigned short bsize;
+ int error;
+ loff_t size;
+ bool partscan;
+ unsigned short bsize;
+ bool is_loop;
+
+ if (!file)
+ return -EBADF;
+ is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- error = -EBADF;
- file = fget(config->fd);
- if (!file)
- goto out;
-
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@@ -1162,7 +1214,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
goto out_putf;
}
- error = mutex_lock_killable(&lo->lo_mutex);
+ error = loop_global_lock_killable(lo, is_loop);
if (error)
goto out_bdev;
@@ -1242,6 +1294,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
size = get_loop_size(lo, file);
loop_set_size(lo, size);
+ /* Order wrt reading lo_state in loop_validate_file(). */
+ wmb();
+
lo->lo_state = Lo_bound;
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
@@ -1253,7 +1308,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
@@ -1261,13 +1316,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return 0;
out_unlock:
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
out_bdev:
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file);
-out:
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return error;
@@ -1283,6 +1337,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
int lo_number;
struct loop_worker *pos, *worker;
+ /*
+ * Flush loop_configure() and loop_change_fd(). It is acceptable for
+ * loop_validate_file() to succeed, for actual clear operation has not
+ * started yet.
+ */
+ mutex_lock(&loop_validate_mutex);
+ mutex_unlock(&loop_validate_mutex);
+ /*
+ * loop_validate_file() now fails because l->lo_state != Lo_bound
+ * became visible.
+ */
+
mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
err = -ENXIO;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 531d390902dd..90b947c96402 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4100,8 +4100,6 @@ again:
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
{
- bool need_wait;
-
dout("%s rbd_dev %p\n", __func__, rbd_dev);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
@@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
*/
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
- need_wait = !list_empty(&rbd_dev->running_list);
- downgrade_write(&rbd_dev->lock_rwsem);
- if (need_wait)
- wait_for_completion(&rbd_dev->releasing_wait);
- up_read(&rbd_dev->lock_rwsem);
+ if (list_empty(&rbd_dev->running_list))
+ return true;
+
+ up_write(&rbd_dev->lock_rwsem);
+ wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
- /*
- * we already know that the remote client is
- * the owner
- */
- up_write(&rbd_dev->lock_rwsem);
- return;
+ dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
+ __func__, rbd_dev, cid.gid, cid.handle);
+ } else {
+ rbd_set_owner_cid(rbd_dev, &cid);
}
-
- rbd_set_owner_cid(rbd_dev, &cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
- dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+ dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
__func__, rbd_dev, cid.gid, cid.handle,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
- up_write(&rbd_dev->lock_rwsem);
- return;
+ } else {
+ rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
}
-
- rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
}
disk->fops = &rbd_bd_ops;
+ disk->private_data = rbd_dev;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 22acde118bc3..fc9196f11cb7 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- write_lock_bh(&mhi_chan->lock);
- mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
- complete(&mhi_chan->completion);
- write_unlock_bh(&mhi_chan->lock);
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Completion packet for invalid channel ID: %d\n", chan);
+ }
mhi_del_ring_element(mhi_cntrl, mhi_ring);
}
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 19413daa0917..b33b9d75e8af 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -33,6 +33,8 @@
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
* @mru_default: default MRU size for MBIM network packets
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ * of inband wake support (such as sdx24)
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
@@ -42,6 +44,7 @@ struct mhi_pci_dev_info {
unsigned int bar_num;
unsigned int dma_data_width;
unsigned int mru_default;
+ bool sideband_wake;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -74,6 +77,22 @@ struct mhi_pci_dev_info {
.doorbell_mode_switch = false, \
}
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ .auto_queue = true, \
+ }
+
#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
{ \
.num_elements = el_count, \
@@ -212,7 +231,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
- MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
@@ -244,7 +263,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.edl = "qcom/sdx65m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
@@ -254,7 +274,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
- .mru_default = 32768
+ .mru_default = 32768,
+ .sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
@@ -262,7 +283,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = true,
};
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
@@ -304,7 +326,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_quectel_em1xx_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = true,
};
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
@@ -342,7 +365,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct pci_device_id mhi_pci_id_table[] = {
@@ -643,11 +667,14 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
- mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
- mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
- mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
mhi_cntrl->mru = info->mru_default;
+ if (info->sideband_wake) {
+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+ }
+
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
return err;
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 10d4457417a4..eb9c65f97841 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
break;
if (!adev->pnp.unique_id && node->acpi.uid == 0)
break;
- acpi_dev_put(adev);
}
if (!adev)
return -ENODEV;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4b7ee3fa9224..847f33ffc4ae 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
{
struct resource *res, *parent;
+ int ret;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res)
@@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
/* we expect a conflict with a 'System RAM' region */
parent = request_resource_conflict(&iomem_resource, res);
- return parent ? request_resource(parent, res) : 0;
+ ret = parent ? request_resource(parent, res) : 0;
+
+ /*
+ * Given that efi_mem_reserve_iomem() can be called at any
+ * time, only call memblock_reserve() if the architecture
+ * keeps the infrastructure around.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+ memblock_reserve(addr, size);
+
+ return ret;
}
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aa8da0a49829..ae87dded989d 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @load_addr: pointer to loaded initrd
* @load_size: size of loaded initrd
- * @soft_limit: preferred size of allocated memory for loading the initrd
- * @hard_limit: minimum size of allocated memory
+ * @soft_limit: preferred address for loading the initrd
+ * @hard_limit: upper limit address for loading the initrd
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index d8bc01340686..38722d2009e2 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
pr_err("EFI MOKvar config table is not valid\n");
return;
}
- efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
efi_mokvar_table_size = map_size_needed;
}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index c1955d320fec..8f665678e9e3 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
- log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
- pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+ pr_info("TPM Final Events table not present\n");
+ goto out;
+ } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table invalid\n");
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c0316eaba547..8ac6eb9f1fdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -619,6 +619,13 @@ struct amdgpu_video_codec_info {
u32 max_level;
};
+#define codec_info_build(type, width, height, level) \
+ .codec_type = type,\
+ .max_width = width,\
+ .max_height = height,\
+ .max_pixels_per_frame = height * width,\
+ .max_level = level,
+
struct amdgpu_video_codecs {
const u32 codec_count;
const struct amdgpu_video_codec_info *codec_array;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 84a1b4bc9bb4..6cc0d4fa4d0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <acpi/video.h>
#include <acpi/actbl.h>
@@ -1042,7 +1043,7 @@ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
- return true;
+ return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
}
#endif
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d303e88e3c23..f3fd5ec710b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3504,13 +3504,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto failed_unmap;
+ return r;
}
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto failed_unmap;
+ return r;
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3736,10 +3736,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
-failed_unmap:
- iounmap(adev->rmmio);
- adev->rmmio = NULL;
-
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index abb928894eac..361b86b71b56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1190,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = {
/* Van Gogh */
{0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
+ /* Yellow Carp */
+ {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+ {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+
/* Navy_Flounder */
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d0d9bc445d7b..854fc497844b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
+ /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
+ * for debugger access to invisible VRAM. Should have used MAP_SHARED
+ * instead. Clearing VM_MAYWRITE prevents the mapping from ever
+ * becoming writable and makes is_cow_mapping(vm_flags) false.
+ */
+ if (is_cow_mapping(vma->vm_flags) &&
+ !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ vma->vm_flags &= ~VM_MAYWRITE;
+
return drm_gem_ttm_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f5e9c022960b..a64b2c706090 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
};
@@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
@@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 94a2c0742ee5..94d029dbf30d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -64,32 +64,13 @@
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
-#define codec_info_build(type, width, height, level) \
- .codec_type = type,\
- .max_width = width,\
- .max_height = height,\
- .max_pixels_per_frame = height * width,\
- .max_level = level,
-
static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode =
@@ -101,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_decode =
@@ -161,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_decode =
@@ -228,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
@@ -333,6 +164,19 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
.codec_array = NULL,
};
+/* Yellow Carp*/
+static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+};
+
+static const struct amdgpu_video_codecs yc_video_codecs_decode = {
+ .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
+ .codec_array = yc_video_codecs_decode_array,
+};
+
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -353,12 +197,17 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode;
return 0;
+ case CHIP_YELLOW_CARP:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &yc_video_codecs_decode;
+ return 0;
case CHIP_BEIGE_GOBY:
if (encode)
*codecs = &bg_video_codecs_encode;
@@ -1387,7 +1236,10 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
- adev->external_rev_id = adev->rev_id + 0x01;
+ if (adev->pdev->device == 0x1681)
+ adev->external_rev_id = adev->rev_id + 0x19;
+ else
+ adev->external_rev_id = adev->rev_id + 0x01;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 618e5b6b85d9..536d41f327c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
+ return err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
@@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
} else {
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out2;
+ goto out;
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
@@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
return 0;
-out2:
+out:
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
-out:
if (err) {
dev_err(adev->dev,
"psp v12.0: Failed to load firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index b02436401d46..b7d350be8050 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -88,20 +88,8 @@
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
@@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs rv_video_codecs_decode =
@@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs rn_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3a2a5ff57e9..b53f49a23ddc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2429,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
- if (caps->ext_caps->bits.oled == 1 ||
+ if (caps->ext_caps->bits.oled == 1 /*||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
- caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
caps->aux_support = true;
if (amdgpu_backlight == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 6e0c5c664fdc..a5331b96f551 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
-// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 513676a6f52b..af7004b770ae 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ /* SOCCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
+ &num_levels);
// DPREFCLK ???
/* DISPCLK */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 7b7d884d58be..4a4894e9d9c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -48,6 +48,21 @@
#include "dc_dmub_srv.h"
+#include "yellow_carp_offset.h"
+
+#define regCLK1_CLK_PLL_REQ 0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
@@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower) {
- if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
- new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
- clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
@@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
- if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
- new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
- clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
@@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
- return 0;
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 31.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
}
static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
@@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
- else if (a->z9_support != b->z9_support)
+ else if (a->zstate_support != b->zstate_support)
return false;
else if (a->dtbclk_en != b->dtbclk_en)
return false;
@@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index cc21cf75eafd..f8f100535526 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -27,60 +27,6 @@
#define __DCN31_CLK_MGR_H__
#include "clk_mgr_internal.h"
-//CLK1_CLK_PLL_REQ
-#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-//CLK1_CLK0_DFS_CNTL
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT 0x0
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK 0x0000007FL
-/*DPREF clock related*/
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-
-//CLK3_0_CLK3_CLK_PLL_REQ
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-
-#define mmCLK0_CLK3_DFS_CNTL 0x16C60
-#define mmCLK00_CLK0_CLK3_DFS_CNTL 0x16C60
-#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E60
-#define mmCLK02_CLK0_CLK3_DFS_CNTL 0x17060
-#define mmCLK03_CLK0_CLK3_DFS_CNTL 0x17260
-
-#define mmCLK0_CLK_PLL_REQ 0x16C10
-#define mmCLK00_CLK0_CLK_PLL_REQ 0x16C10
-#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E10
-#define mmCLK02_CLK0_CLK_PLL_REQ 0x17010
-#define mmCLK03_CLK0_CLK_PLL_REQ 0x17210
-
-#define mmCLK1_CLK_PLL_REQ 0x1B00D
-#define mmCLK10_CLK1_CLK_PLL_REQ 0x1B00D
-#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
-#define mmCLK12_CLK1_CLK_PLL_REQ 0x1B40D
-#define mmCLK13_CLK1_CLK_PLL_REQ 0x1B60D
-
-#define mmCLK2_CLK_PLL_REQ 0x17E0D
-
-/*AMCLK*/
-#define mmCLK11_CLK1_CLK0_DFS_CNTL 0x1B23F
-#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
-#endif
-
struct dcn31_watermarks;
struct dcn31_smu_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6da226bf11d5..9fb8c46dc606 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries(
*/
panel_mode = DP_PANEL_MODE_DEFAULT;
}
- } else
- panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
}
#endif
@@ -4650,7 +4649,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
}
}
- if (link->dpcd_caps.panel_mode_edp) {
+ if (link->dpcd_caps.panel_mode_edp &&
+ (link->connector_signal == SIGNAL_TYPE_EDP ||
+ (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->is_internal_display))) {
return DP_PANEL_MODE_EDP;
}
@@ -4914,9 +4916,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
{
uint32_t default_backlight;
- if (link &&
- (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
- link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!dc_link_read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
// if < 5 nits or > 5000, it might be wrong readback
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a6a67244a322..1596f6b7fed7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
* did not show such problems, so this seems to be the exception.
*/
- if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
+ if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 45640f1c26c4..8dcea8ff5c5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -354,10 +354,10 @@ enum dcn_pwr_state {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
-enum dcn_z9_support_state {
- DCN_Z9_SUPPORT_UNKNOWN,
- DCN_Z9_SUPPORT_ALLOW,
- DCN_Z9_SUPPORT_DISALLOW,
+enum dcn_zstate_support_state {
+ DCN_ZSTATE_SUPPORT_UNKNOWN,
+ DCN_ZSTATE_SUPPORT_ALLOW,
+ DCN_ZSTATE_SUPPORT_DISALLOW,
};
#endif
/*
@@ -378,7 +378,7 @@ struct dc_clocks {
int dramclk_khz;
bool p_state_change_support;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- enum dcn_z9_support_state z9_support;
+ enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
#endif
enum dcn_pwr_state pwr_state;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index df6539e4c730..0464a8f3db3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -636,6 +636,7 @@ struct dce_hwseq_registers {
uint32_t ODM_MEM_PWR_CTRL3;
uint32_t DMU_MEM_PWR_CNTL;
uint32_t MMHUBBUB_MEM_PWR_CNTL;
+ uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1110,7 +1111,8 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_FORCEON;\
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
- type HPO_HDMISTREAMCLK_G_GATE_DIS;
+ type HPO_HDMISTREAMCLK_G_GATE_DIS;\
+ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 673b93f4fea5..cb9767ddf93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb(
const struct line_buffer_params *lb_params,
enum lb_memory_config mem_size_config)
{
+ uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
+
/* LB */
if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL caps: pixel data processed in fixed format */
@@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb(
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
+ if (dpp->base.caps->max_lb_partitions == 31)
+ max_partitions = 31;
+
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
- LB_MAX_PARTITIONS, 63);
+ LB_MAX_PARTITIONS, max_partitions);
}
static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 1b05a37b674d..b173fa3653b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context(
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
pipes[pipe_cnt].pipe.dest.vtotal = v_total;
- pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
- pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
+ pipes[pipe_cnt].pipe.dest.hactive =
+ timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pipes[pipe_cnt].pipe.dest.vactive =
+ timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
return false;
}
+static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ int plane_count;
+ int i;
+
+ plane_count = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /*
+ * Zstate is allowed in following scenarios:
+ * 1. Single eDP with PSR enabled
+ * 2. 0 planes (No memory requests)
+ * 3. Single eDP without PSR but > 5ms stutter period
+ */
+ if (plane_count == 0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+
+ if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
+ || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+ } else
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+}
+
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params(
int vlevel)
{
int i, pipe_idx;
- int plane_count;
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
- DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
-
- plane_count = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
- }
-
- if (plane_count == 0)
- context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+ context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f3d98e3ba624..bf0a198eae15 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -109,6 +109,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.max_page_table_levels = 4,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 2140b75540cf..23a52d47e61c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
@@ -1440,15 +1433,6 @@ bool dpp3_construct(
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
- dpp->lb_pixel_depth_supported =
- LB_PIXEL_DEPTH_18BPP |
- LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP |
- LB_PIXEL_DEPTH_36BPP;
-
- dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
- dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
-
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index 3fa86cd090a0..ac644ae6b9f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -154,6 +154,7 @@
SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI(CURSOR_CONTROL, CURSOR0_, id),\
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
#define DPP_REG_LIST_DCN30(id)\
@@ -163,8 +164,6 @@
SRI(CM_SHAPER_LUT_DATA, CM, id),\
SRI(CM_MEM_PWR_CTRL2, CM, id), \
SRI(CM_MEM_PWR_STATUS2, CM, id), \
- SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
- SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 16a75ba0ca82..7d3ff5d44402 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+ /* Populate from bw_params for DTBCLK, SOCCLK */
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
+ else
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+ dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
+ else
+ dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ /* FCLK, PHYCLK_D18, DSCCLK */
dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 34b89464ae02..833ab13fa834 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz;
+ /* Populate from bw_params for DTBCLK, SOCCLK */
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+ dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
+ else
+ dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+ dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
+ else
+ dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ /* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 836864a5a5dc..6ac6faf0c533 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -47,6 +47,7 @@
#include "dce/dmub_outbox.h"
#include "dc_link_dp.h"
#include "inc/link_dpcd.h"
+#include "dcn10/dcn10_hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -594,3 +595,20 @@ bool dcn31_is_abm_supported(struct dc *dc,
}
return false;
}
+
+static void apply_riommu_invalidation_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (!hws->wa.early_riommu_invalidation)
+ return;
+
+ REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
+}
+
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
+{
+ dcn10_init_pipes(dc, context);
+ apply_riommu_invalidation_wa(dc);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index ff72f0fdd5be..40dfebe78fdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -52,5 +52,6 @@ void dcn31_reset_hw_ctx_wrap(
struct dc_state *context);
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index e3048f8827d2..aaf2dbd095fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -104,7 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
- .init_pipes = dcn10_init_pipes,
+ .init_pipes = dcn31_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index c67bc9544f5d..38c010afade1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.sr_exit_z8_time_us = 402.0,
.sr_enter_plus_exit_z8_time_us = 520.0,
.writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = {
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
@@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = {
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
@@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.early_riommu_invalidation = true;
}
return hws;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index c26e742e8137..6655bb99fdfd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -841,6 +841,9 @@ static bool CalculatePrefetchSchedule(
else
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ // Limit to prevent overflow in DST_Y_PREFETCH register
+ *DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75);
+
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: TCalc: %f\n", TCalc);
dml_print("DML: TWait: %f\n", TWait);
@@ -4889,7 +4892,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
- || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
+ || mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 2a0db2b03047..9ac9d5e8df8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -289,6 +289,9 @@ struct dpp_caps {
/* DSCL processing pixel data in fixed or float format */
enum dscl_data_processing_format dscl_data_proc_format;
+ /* max LB partitions */
+ unsigned int max_lb_partitions;
+
/* Calculates the number of partitions in the line buffer.
* The implementation of this function is overloaded for
* different versions of DSCL LB.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f7f7e4fff0c2..082549f75978 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,6 +41,7 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
+ bool early_riommu_invalidation;
};
struct hwseq_wa_state {
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
index 610266088ff1..35fa0d8e92dd 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
@@ -101,7 +101,8 @@
#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
-#define PPSMC_Message_Count 0x43
+#define PPSMC_MSG_BoardPowerCalibration 0x43
+#define PPSMC_Message_Count 0x44
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 89a16dcd0fff..1d3765b873df 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -225,7 +225,8 @@
__SMU_DUMMY_MAP(DisableDeterminism), \
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
- __SMU_DUMMY_MAP(GfxDriverResetRecovery),
+ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \
+ __SMU_DUMMY_MAP(BoardPowerCalibration),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 1962a5877191..f61b5c914a3d 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -34,7 +34,7 @@
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
/* MP Apertures */
#define MP0_Public 0x03800000
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 9316a726195c..cb5485cf243f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
return ret;
}
+static bool aldebaran_is_primary(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
+ return adev->smuio.funcs->get_die_id(adev) == 0;
+
+ return true;
+}
+
+static int aldebaran_run_board_btc(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret;
+
+ if (!aldebaran_is_primary(smu))
+ return 0;
+
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get smu version!\n");
+ return ret;
+ }
+ if (smu_version <= 0x00441d00)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Board power calibration failed!\n");
+
+ return ret;
+}
+
static int aldebaran_run_btc(struct smu_context *smu)
{
int ret;
@@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
if (ret)
dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+ else
+ ret = aldebaran_run_board_btc(smu);
return ret;
}
@@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
return (abs(frequency1 - frequency2) <= EPSILON);
}
-static bool aldebaran_is_primary(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
- return adev->smuio.funcs->get_die_id(adev) == 0;
-
- return true;
-}
-
static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 98ae00661656..f454e0424086 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
+
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 5b6922e28ef2..aa667fa71158 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2166,7 +2166,8 @@ static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
{
enum port port;
- int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
+ int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+ BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3bad4e00f7be..2d5d21740c25 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11361,13 +11361,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
vlv_dsi_init(dev_priv);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(dev_priv) == 10) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_DDI(dev_priv)) {
u32 found;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index a8abc9af5ff4..4a6419d7be93 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,10 +25,8 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
-#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_memcpy.h"
struct eb_vma {
struct i915_vma *vma;
@@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
int err;
struct intel_engine_cs *engine = eb->engine;
+ /* If we need to copy for the cmdparser, we will stall anyway */
+ if (eb_use_cmdparser(eb))
+ return ERR_PTR(-EWOULDBLOCK);
+
if (!reloc_can_use_engine(engine)) {
engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
if (!engine)
@@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
return vma;
}
-struct eb_parse_work {
- struct dma_fence_work base;
- struct intel_engine_cs *engine;
- struct i915_vma *batch;
- struct i915_vma *shadow;
- struct i915_vma *trampoline;
- unsigned long batch_offset;
- unsigned long batch_length;
- unsigned long *jump_whitelist;
- const void *batch_map;
- void *shadow_map;
-};
-
-static int __eb_parse(struct dma_fence_work *work)
-{
- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
- int ret;
- bool cookie;
-
- cookie = dma_fence_begin_signalling();
- ret = intel_engine_cmd_parser(pw->engine,
- pw->batch,
- pw->batch_offset,
- pw->batch_length,
- pw->shadow,
- pw->jump_whitelist,
- pw->shadow_map,
- pw->batch_map);
- dma_fence_end_signalling(cookie);
-
- return ret;
-}
-
-static void __eb_parse_release(struct dma_fence_work *work)
-{
- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-
- if (!IS_ERR_OR_NULL(pw->jump_whitelist))
- kfree(pw->jump_whitelist);
-
- if (pw->batch_map)
- i915_gem_object_unpin_map(pw->batch->obj);
- else
- i915_gem_object_unpin_pages(pw->batch->obj);
-
- i915_gem_object_unpin_map(pw->shadow->obj);
-
- if (pw->trampoline)
- i915_active_release(&pw->trampoline->active);
- i915_active_release(&pw->shadow->active);
- i915_active_release(&pw->batch->active);
-}
-
-static const struct dma_fence_work_ops eb_parse_ops = {
- .name = "eb_parse",
- .work = __eb_parse,
- .release = __eb_parse_release,
-};
-
-static inline int
-__parser_mark_active(struct i915_vma *vma,
- struct intel_timeline *tl,
- struct dma_fence *fence)
-{
- struct intel_gt_buffer_pool_node *node = vma->private;
-
- return i915_active_ref(&node->active, tl->fence_context, fence);
-}
-
-static int
-parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
-{
- int err;
-
- mutex_lock(&tl->mutex);
-
- err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
- if (err)
- goto unlock;
-
- if (pw->trampoline) {
- err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
- if (err)
- goto unlock;
- }
-
-unlock:
- mutex_unlock(&tl->mutex);
- return err;
-}
-
-static int eb_parse_pipeline(struct i915_execbuffer *eb,
- struct i915_vma *shadow,
- struct i915_vma *trampoline)
-{
- struct eb_parse_work *pw;
- struct drm_i915_gem_object *batch = eb->batch->vma->obj;
- bool needs_clflush;
- int err;
-
- GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
- GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
-
- pw = kzalloc(sizeof(*pw), GFP_KERNEL);
- if (!pw)
- return -ENOMEM;
-
- err = i915_active_acquire(&eb->batch->vma->active);
- if (err)
- goto err_free;
-
- err = i915_active_acquire(&shadow->active);
- if (err)
- goto err_batch;
-
- if (trampoline) {
- err = i915_active_acquire(&trampoline->active);
- if (err)
- goto err_shadow;
- }
-
- pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
- if (IS_ERR(pw->shadow_map)) {
- err = PTR_ERR(pw->shadow_map);
- goto err_trampoline;
- }
-
- needs_clflush =
- !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
- pw->batch_map = ERR_PTR(-ENODEV);
- if (needs_clflush && i915_has_memcpy_from_wc())
- pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
-
- if (IS_ERR(pw->batch_map)) {
- err = i915_gem_object_pin_pages(batch);
- if (err)
- goto err_unmap_shadow;
- pw->batch_map = NULL;
- }
-
- pw->jump_whitelist =
- intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
- trampoline);
- if (IS_ERR(pw->jump_whitelist)) {
- err = PTR_ERR(pw->jump_whitelist);
- goto err_unmap_batch;
- }
-
- dma_fence_work_init(&pw->base, &eb_parse_ops);
-
- pw->engine = eb->engine;
- pw->batch = eb->batch->vma;
- pw->batch_offset = eb->batch_start_offset;
- pw->batch_length = eb->batch_len;
- pw->shadow = shadow;
- pw->trampoline = trampoline;
-
- /* Mark active refs early for this worker, in case we get interrupted */
- err = parser_mark_active(pw, eb->context->timeline);
- if (err)
- goto err_commit;
-
- err = dma_resv_reserve_shared(pw->batch->resv, 1);
- if (err)
- goto err_commit;
-
- err = dma_resv_reserve_shared(shadow->resv, 1);
- if (err)
- goto err_commit;
-
- /* Wait for all writes (and relocs) into the batch to complete */
- err = i915_sw_fence_await_reservation(&pw->base.chain,
- pw->batch->resv, NULL, false,
- 0, I915_FENCE_GFP);
- if (err < 0)
- goto err_commit;
-
- /* Keep the batch alive and unwritten as we parse */
- dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
-
- /* Force execution to wait for completion of the parser */
- dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
-
- dma_fence_work_commit_imm(&pw->base);
- return 0;
-
-err_commit:
- i915_sw_fence_set_error_once(&pw->base.chain, err);
- dma_fence_work_commit_imm(&pw->base);
- return err;
-
-err_unmap_batch:
- if (pw->batch_map)
- i915_gem_object_unpin_map(batch);
- else
- i915_gem_object_unpin_pages(batch);
-err_unmap_shadow:
- i915_gem_object_unpin_map(shadow->obj);
-err_trampoline:
- if (trampoline)
- i915_active_release(&trampoline->active);
-err_shadow:
- i915_active_release(&shadow->active);
-err_batch:
- i915_active_release(&eb->batch->vma->active);
-err_free:
- kfree(pw);
- return err;
-}
-
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
/*
@@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err_trampoline;
}
- err = eb_parse_pipeline(eb, shadow, trampoline);
+ err = dma_resv_reserve_shared(shadow->resv, 1);
+ if (err)
+ goto err_trampoline;
+
+ err = intel_engine_cmd_parser(eb->engine,
+ eb->batch->vma,
+ eb->batch_start_offset,
+ eb->batch_len,
+ shadow, trampoline);
if (err)
goto err_unpin_batch;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 4df505e4c53a..16162fc2782d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
intel_gt_pm_get(&eb.i915->gt);
for_each_uabi_engine(eb.engine, eb.i915) {
+ if (intel_engine_requires_cmd_parser(eb.engine) ||
+ intel_engine_using_cmd_parser(eb.engine))
+ continue;
+
reloc_cache_init(&eb.reloc_cache, eb.i915);
memset(map, POISON_INUSE, 4096);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 98eb48c24c46..06024d321a1a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
+ /*
+ * Due to d3_entered is used to indicate skipping PPGTT invalidation on
+ * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
+ * vGPU reset if in resuming.
+ * In S0ix exit, the device power state also transite from D3 to D0 as
+ * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+ * S0ix exit, all engines continue to work. However the d3_entered
+ * remains set which will break next vGPU reset logic (miss the expected
+ * PPGTT invalidation).
+ * Engines can only work in D0. Thus the 1st elsp write gives GVT a
+ * chance to clear d3_entered.
+ */
+ if (vgpu->d3_entered)
+ vgpu->d3_entered = false;
+
execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 3992c25a191d..a3b4d99d64b9 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
unsigned long offset, unsigned long length,
- void *dst, const void *src)
+ bool *needs_clflush_after)
{
- bool needs_clflush =
- !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
- if (src) {
- GEM_BUG_ON(!needs_clflush);
- i915_unaligned_memcpy_from_wc(dst, src + offset, length);
- } else {
- struct scatterlist *sg;
+ unsigned int src_needs_clflush;
+ unsigned int dst_needs_clflush;
+ void *dst, *src;
+ int ret;
+
+ ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+ i915_gem_object_finish_access(dst_obj);
+ if (IS_ERR(dst))
+ return dst;
+
+ ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ if (ret) {
+ i915_gem_object_unpin_map(dst_obj);
+ return ERR_PTR(ret);
+ }
+
+ src = ERR_PTR(-ENODEV);
+ if (src_needs_clflush && i915_has_memcpy_from_wc()) {
+ src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+ if (!IS_ERR(src)) {
+ i915_unaligned_memcpy_from_wc(dst,
+ src + offset,
+ length);
+ i915_gem_object_unpin_map(src_obj);
+ }
+ }
+ if (IS_ERR(src)) {
+ unsigned long x, n, remain;
void *ptr;
- unsigned int x, sg_ofs;
- unsigned long remain;
/*
* We can avoid clflushing partial cachelines before the write
@@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* validate up to the end of the batch.
*/
remain = length;
- if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ if (dst_needs_clflush & CLFLUSH_BEFORE)
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
-
- while (remain) {
- unsigned long sg_max = sg->length >> PAGE_SHIFT;
-
- for (; remain && sg_ofs < sg_max; sg_ofs++) {
- unsigned long len = min(remain, PAGE_SIZE - x);
- void *map;
-
- map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
- if (needs_clflush)
- drm_clflush_virt_range(map + x, len);
- memcpy(ptr, map + x, len);
- kunmap_atomic(map);
-
- ptr += len;
- remain -= len;
- x = 0;
- }
-
- sg_ofs = 0;
- sg = sg_next(sg);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
+
+ src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ if (src_needs_clflush)
+ drm_clflush_virt_range(src + x, len);
+ memcpy(ptr, src + x, len);
+ kunmap_atomic(src);
+
+ ptr += len;
+ remain -= len;
+ x = 0;
}
}
+ i915_gem_object_finish_access(src_obj);
+
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
+ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
+
return dst;
}
@@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
if (target_cmd_index == offset)
return 0;
+ if (IS_ERR(jump_whitelist))
+ return PTR_ERR(jump_whitelist);
+
if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
@@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
-/**
- * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
- * @batch_length: length of the commands in batch_obj
- * @trampoline: Whether jump trampolines are used.
- *
- * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
- * This has to be preallocated, because the command parser runs in signaling context,
- * and may not allocate any memory.
- *
- * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
- * IS_ERR() to check for errors. Must bre freed() with kfree().
- *
- * NULL is a valid value, meaning no allocation was required.
- */
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
- bool trampoline)
+static unsigned long *alloc_whitelist(u32 batch_length)
{
unsigned long *jmp;
- if (trampoline)
- return NULL;
-
/*
* We expect batch_length to be less than 256KiB for known users,
* i.e. we need at most an 8KiB bitmap allocation which should be
@@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
- * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
- * @shadow_map: mapping to @shadow vma
- * @batch_map: mapping to @batch vma
+ * @trampoline: true if we need to trampoline into privileged execution
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
+
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- unsigned long *jump_whitelist,
- void *shadow_map,
- const void *batch_map)
+ bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
+ bool needs_clflush_after = false;
+ unsigned long *jump_whitelist;
u64 batch_addr, shadow_addr;
int ret = 0;
- bool trampoline = !jump_whitelist;
GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
batch->size));
GEM_BUG_ON(!batch_length);
- cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
- shadow_map, batch_map);
+ cmd = copy_batch(shadow->obj, batch->obj,
+ batch_offset, batch_length,
+ &needs_clflush_after);
+ if (IS_ERR(cmd)) {
+ DRM_DEBUG("CMD: Failed to copy batch\n");
+ return PTR_ERR(cmd);
+ }
+
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
shadow_addr = gen8_canonical_addr(shadow->node.start);
batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
i915_gem_object_flush_map(shadow->obj);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ kfree(jump_whitelist);
+ i915_gem_object_unpin_map(shadow->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 38ff2fb89744..b30397b04529 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1906,17 +1906,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
- bool trampoline);
-
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- unsigned long *jump_whitelist,
- void *shadow_map,
- const void *batch_map);
+ bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1014c71cf7f5..37aef1308573 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- i915_sw_fence_set_error_once(&rq->submit, fence->error);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
- }
if (fence->context == rq->fence.context)
continue;
@@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- i915_sw_fence_set_error_once(&rq->submit, fence->error);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
- }
/*
* Requests on the same timeline are explicitly ordered, along
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7eaa92fee421..e0a10f36acc1 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
+ } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +340,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- if (GRAPHICS_VER(dev_priv) >= 12 &&
+
+ if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
info->pipe_mask &= ~BIT(PIPE_D);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,10 +353,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
info->display.has_fbc = 0;
- if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
info->display.has_dmc = 0;
- if (GRAPHICS_VER(dev_priv) >= 10 &&
+ if (DISPLAY_VER(dev_priv) >= 10 &&
(dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
info->display.has_dsc = 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index d01c4c919504..704dace895cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
static const struct dpu_mdp_cfg sm8250_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
- .base = 0x0, .len = 0x45C,
+ .base = 0x0, .len = 0x494,
.features = 0,
.highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca96e3514790..c0423e76eed7 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -771,6 +771,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
dp_catalog->width_blanking);
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ee221d835fa0..eaddfd739885 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1526,7 +1526,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 051c1be1de7e..867388a399ad 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -219,6 +219,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
+ dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
@@ -1311,6 +1312,10 @@ static int dp_pm_resume(struct device *dev)
else
dp->dp_display.is_connected = false;
+ dp_display_handle_plugged_change(g_dp_display,
+ dp->dp_display.is_connected);
+
+
mutex_unlock(&dp->event_mutex);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index eed2a762e9dd..bcaddbba564d 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg);
+
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
if (!ttbr1_cfg)
return ERR_PTR(-ENODEV);
+ /*
+ * Defer setting the fault handler until we have a valid adreno_smmu
+ * to avoid accidentially installing a GPU specific fault handler for
+ * the display's iommu
+ */
+ iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
if (!pagetable)
return ERR_PTR(-ENOMEM);
@@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- iommu_set_fault_handler(domain, msm_fault_handler, iommu);
atomic_set(&iommu->pagetables, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4f3a5357dd56..6d07e653f82d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -149,6 +149,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
*/
if (bo->base.dev)
drm_gem_object_release(&bo->base);
+ else
+ dma_resv_fini(&bo->base._resv);
kfree(nvbo);
}
@@ -330,6 +332,10 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
+ nvbo->bo.base.size = size;
+ dma_resv_init(&nvbo->bo.base._resv);
+ drm_vma_node_reset(&nvbo->bo.base.vma_node);
+
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 2229f1af2ca8..46029c5610c8 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
- kfree(ts->dsi);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 21939d4352cf..1b80290c2b53 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -4166,7 +4166,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode
static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
.num_modes = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 154,
.height = 90,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1b950b45cf4b..8d7fd65ccced 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
return;
}
+ if (!mem)
+ return;
+
man = ttm_manager_type(bdev, mem->mem_type);
list_move_tail(&bo->lru, &man->lru[bo->priority]);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2f57f824e6db..763fa6f4e07d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -63,6 +63,9 @@ int ttm_mem_io_reserve(struct ttm_device *bdev,
void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem)
{
+ if (!mem)
+ return;
+
if (!mem->bus.offset && !mem->bus.addr)
return;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 5f31acec3ad7..74e3b460132b 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
+struct dentry *ttm_debugfs_root;
+
static void ttm_global_release(void)
{
struct ttm_global *glob = &ttm_glob;
@@ -53,6 +55,7 @@ static void ttm_global_release(void)
goto out;
ttm_pool_mgr_fini();
+ debugfs_remove(ttm_debugfs_root);
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
@@ -73,6 +76,13 @@ static int ttm_global_init(void)
si_meminfo(&si);
+ ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+ if (IS_ERR(ttm_debugfs_root)) {
+ ret = PTR_ERR(ttm_debugfs_root);
+ ttm_debugfs_root = NULL;
+ goto out;
+ }
+
/* Limit the number of pages in the pool to about 50% of the total
* system memory.
*/
@@ -100,6 +110,10 @@ static int ttm_global_init(void)
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out:
+ if (ret && ttm_debugfs_root)
+ debugfs_remove(ttm_debugfs_root);
+ if (ret)
+ --ttm_glob_use_count;
mutex_unlock(&ttm_global_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 997c458f68a9..7fcdef278c74 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -72,22 +72,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
return tmp;
}
-struct dentry *ttm_debugfs_root;
-
-static int __init ttm_init(void)
-{
- ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
- return 0;
-}
-
-static void __exit ttm_exit(void)
-{
- debugfs_remove(ttm_debugfs_root);
-}
-
-module_init(ttm_init);
-module_exit(ttm_exit);
-
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index aab1b36ceb3c..c2876731ee2d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1857,38 +1857,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (vc4_hdmi->variant->external_irq_controller) {
- ret = devm_request_threaded_irq(&pdev->dev,
- platform_get_irq_byname(pdev, "cec-rx"),
- vc4_cec_irq_handler_rx_bare,
- vc4_cec_irq_handler_rx_thread, 0,
- "vc4 hdmi cec rx", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
- ret = devm_request_threaded_irq(&pdev->dev,
- platform_get_irq_byname(pdev, "cec-tx"),
- vc4_cec_irq_handler_tx_bare,
- vc4_cec_irq_handler_tx_thread, 0,
- "vc4 hdmi cec tx", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ goto err_remove_cec_rx_handler;
} else {
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
- ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
- goto err_delete_cec_adap;
+ goto err_remove_handlers;
return 0;
+err_remove_handlers:
+ if (vc4_hdmi->variant->external_irq_controller)
+ free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+ else
+ free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+
+err_remove_cec_rx_handler:
+ if (vc4_hdmi->variant->external_irq_controller)
+ free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
@@ -1897,6 +1905,15 @@ err_delete_cec_adap:
static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+
+ if (vc4_hdmi->variant->external_irq_controller) {
+ free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+ free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+ } else {
+ free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+ }
+
cec_unregister_adapter(vc4_hdmi->cec_adap);
}
#else
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 160554903ef9..76937f716fbe 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -576,7 +576,7 @@ config HID_LOGITECH_HIDPP
depends on HID_LOGITECH
select POWER_SUPPLY
help
- Support for Logitech devices relyingon the HID++ Logitech specification
+ Support for Logitech devices relying on the HID++ Logitech specification
Say Y if you want support for Logitech devices relying on the HID++
specification. Such devices are the various Logitech Touchpads (T650,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 96e2577fa37e..8d68796aa905 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -58,7 +58,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
- writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+ writeq(0x0, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 6b8f0d004d34..dc6bd4299c54 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -501,6 +501,8 @@ static const struct hid_device_id apple_devices[] = {
APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
.driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index fca8fc78a78a..fb807c8e989b 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -485,9 +485,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
- if (led->brightness == brightness)
- return;
-
led->brightness = brightness;
schedule_work(&led->work);
}
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index f43a8406cb9a..4ef1c3b8094e 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -742,7 +742,7 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
int ret;
ret = ft260_get_system_config(hdev, &cfg);
- if (ret)
+ if (ret < 0)
return ret;
ft260_dbg("interface: 0x%02x\n", interface);
@@ -754,23 +754,16 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
switch (cfg.chip_mode) {
case FT260_MODE_ALL:
case FT260_MODE_BOTH:
- if (interface == 1) {
+ if (interface == 1)
hid_info(hdev, "uart interface is not supported\n");
- return 0;
- }
- ret = 1;
+ else
+ ret = 1;
break;
case FT260_MODE_UART:
- if (interface == 0) {
- hid_info(hdev, "uart is unsupported on interface 0\n");
- ret = 0;
- }
+ hid_info(hdev, "uart interface is not supported\n");
break;
case FT260_MODE_I2C:
- if (interface == 1) {
- hid_info(hdev, "i2c is unsupported on interface 1\n");
- ret = 0;
- }
+ ret = 1;
break;
}
return ret;
@@ -785,7 +778,7 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", *field);
}
static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
@@ -797,7 +790,7 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field));
}
#define FT260_ATTR_SHOW(name, reptype, id, type, func) \
@@ -1004,11 +997,9 @@ err_hid_stop:
static void ft260_remove(struct hid_device *hdev)
{
- int ret;
struct ft260_device *dev = hid_get_drvdata(hdev);
- ret = ft260_is_interface_enabled(hdev);
- if (ret <= 0)
+ if (!dev)
return;
sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 6b1fa971b33e..91bf4d01e91a 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -784,6 +784,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
}
}
+static void hid_ishtp_cl_resume_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
+ struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
+
+ if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ }
+}
+
ishtp_print_log ishtp_hid_print_trace;
/**
@@ -822,6 +833,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
init_waitqueue_head(&client_data->ishtp_resume_wait);
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler);
+
ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
@@ -921,7 +934,7 @@ static int hid_ishtp_cl_resume(struct device *device)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- client_data->suspended = false;
+ schedule_work(&client_data->resume_work);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index f88443a7d935..6a5cc11aefd8 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -135,6 +135,7 @@ struct ishtp_cl_data {
int multi_packet_cnt;
struct work_struct work;
+ struct work_struct resume_work;
struct ishtp_cl_device *cl_device;
};
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f0802b047ed8..aa2c51624012 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -314,13 +314,6 @@ static int ishtp_cl_device_resume(struct device *dev)
if (!device)
return 0;
- /*
- * When ISH needs hard reset, it is done asynchrnously, hence bus
- * resume will be called before full ISH resume
- */
- if (device->ishtp_dev->resume_flag)
- return 0;
-
driver = to_ishtp_cl_driver(dev->driver);
if (driver && driver->driver.pm) {
if (driver->driver.pm->resume)
@@ -850,6 +843,28 @@ struct device *ishtp_device(struct ishtp_cl_device *device)
EXPORT_SYMBOL(ishtp_device);
/**
+ * ishtp_wait_resume() - Wait for IPC resume
+ *
+ * Wait for IPC resume
+ *
+ * Return: resume complete or not
+ */
+bool ishtp_wait_resume(struct ishtp_device *dev)
+{
+ /* 50ms to get resume response */
+ #define WAIT_FOR_RESUME_ACK_MS 50
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+ return (!dev->resume_flag);
+}
+EXPORT_SYMBOL_GPL(ishtp_wait_resume);
+
+/**
* ishtp_get_pci_device() - Return PCI device dev pointer
* This interface is used to return PCI device pointer
* from ishtp_cl_device instance.
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index dcf3a235870f..7c2032f7f44d 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -38,7 +38,7 @@ config USB_HIDDEV
help
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
- devices, like monitor controls and Uninterruptable Power Supplies.
+ devices, like monitor controls and Uninterruptible Power Supplies.
This module supports these devices separately using a separate
event interface on /dev/usb/hiddevX (char 180:96 to 180:111).
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 81d7d12bcf34..81ba642adcb7 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2548,6 +2548,9 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
int slot;
slot = input_mt_get_slot_by_key(input, hid_data->id);
+ if (slot < 0)
+ return;
+
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
}
@@ -3831,7 +3834,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
wacom_wac->shared->touch->product == 0xF6) {
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
- wacom_wac->shared->has_mute_touch_switch = true;
+ wacom_wac->has_mute_touch_switch = true;
}
fallthrough;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6d5014ebaab5..a6ea1eb1394e 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
status = readb(i2c->base + MPC_I2C_SR);
if (status & CSR_MIF) {
- /* Read again to allow register to stabilise */
- status = readb(i2c->base + MPC_I2C_SR);
+ /* Wait up to 100us for transfer to properly complete */
+ readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
writeb(0, i2c->base + MPC_I2C_SR);
mpc_i2c_do_intr(i2c, status);
return IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index d5674026512a..a8688a92c760 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
if (!chip_ctx)
return -ENOMEM;
chip_ctx->chip_num = bp->chip_num;
+ chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
dma_addr_t dma_map,
u32 *fw_stats_ctx_id)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {0};
struct hwrm_stat_ctx_alloc_input req = {0};
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
- req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+ req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 17f0701b3cee..44282a8cdd4f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -56,6 +56,7 @@
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats);
/* PBL */
@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
goto fail;
stats_alloc:
/* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
if (rc)
goto fail;
@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
}
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
- /* 128 byte aligned context memory is required only for 57500.
- * However making this unconditional, it does not harm previous
- * generation.
- */
- stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
+ stats->size = cctx->hw_stats_size;
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index c291f495ae91..91031502e8f5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
u16 chip_num;
u8 chip_rev;
u8 chip_metal;
+ u16 hw_stats_size;
struct bnxt_qplib_drv_modes modes;
};
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index b1023a7d0bd1..f1e5515256e0 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
* parses fpm commit info and copy base value
* of hmc objects in hmc_info
*/
-static enum irdma_status_code
+static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_obj_info *info, u32 *sd)
{
@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
else
*sd = (u32)(size >> 21);
- return 0;
}
/**
@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
* @dev: sc device struct
* @count: allocate count
*/
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{
writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
-
- return 0;
}
/**
@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
&commit_fpm_mem, true, wait_type);
if (!ret_code)
- ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
- hmc_info->hmc_obj,
- &hmc_info->sd_table.sd_cnt);
+ irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
false);
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 7afb8a6a0526..00de5ee9a260 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
* irdma_set_hw_rsrc - set hw memory resources.
* @rf: RDMA PCI function
*/
-static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
{
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock);
spin_lock_init(&rf->qh_list_lock);
-
- return 0;
}
/**
@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
- ret = irdma_set_hw_rsrc(rf);
- if (ret)
- goto set_hw_rsrc_fail;
+ irdma_set_hw_rsrc(rf);
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
-set_hw_rsrc_fail:
- kfree(rf->mem_rsrc);
- rf->mem_rsrc = NULL;
mem_rsrc_kzalloc_fail:
kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index ea59432351fb..51a41359e0b4 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+ struct ice_vsi *vsi)
{
struct irdma_pci_f *rf = iwdev->rf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
rf->cdev = pf;
rf->gen_ops.register_qset = irdma_lan_register_qset;
@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct iidc_qos_params qos_info = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_l2params l2params = {};
int err;
+ if (!vsi)
+ return -EIO;
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf);
+ irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 7387b83e826d..874bc25a938b 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info);
enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info);
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
- u32 count);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index a6d52c20091c..5fb92de1f015 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info)
{
- u32 total_size = 0, wqe_idx, i, byte_off;
+ u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
__le64 *wqe;
u64 hdr;
@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
if (qp->max_rq_frag_cnt < info->num_sges)
return IRDMA_ERR_INVALID_FRAG_COUNT;
- for (i = 0; i < info->num_sges; i++)
- total_size += info->sg_list[i].len;
-
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9712f6902ba8..717147ed0519 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
* @iwqp: qp ptr
* @init_info: initialize info to return
*/
-static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info)
{
@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
}
-
- return 0;
}
/**
@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
}
}
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ irdma_setup_virt_qp(iwdev, iwqp, &init_info);
} else {
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..be4bcb420fab 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
- pr_warn("err %d from rxe_umem_get\n",
- (int)PTR_ERR(umem));
+ pr_warn("%s: Unable to pin memory region err = %d\n",
+ __func__, (int)PTR_ERR(umem));
err = PTR_ERR(umem);
- goto err1;
+ goto err_out;
}
mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("err %d from rxe_mr_alloc\n", err);
- ib_umem_release(umem);
- goto err1;
+ pr_warn("%s: Unable to allocate memory for map\n",
+ __func__);
+ goto err_release_umem;
}
mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
- pr_warn("null vaddr\n");
- ib_umem_release(umem);
+ pr_warn("%s: Unable to get virtual address\n",
+ __func__);
err = -ENOMEM;
- goto err1;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0;
-err1:
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
+err_release_umem:
+ ib_umem_release(umem);
+err_out:
return err;
}
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 4657e99df033..59a36f922675 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled) {
- acpi_dev_put(adev);
+ if (!adev->status.enabled)
continue;
- }
if (bridge->n_sensors >= CIO2_NUM_PORTS) {
acpi_dev_put(adev);
@@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
}
sensor = &bridge->sensors[bridge->n_sensors];
- sensor->adev = adev;
strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
@@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
goto err_free_swnodes;
}
+ sensor->adev = acpi_dev_get(adev);
adev->fwnode.secondary = fwnode;
dev_info(&cio2->dev, "Found supported sensor %s\n",
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 07f342db6701..7481f553f959 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
com.cmd.hdr.Length = 6;
- memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
+ memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
com.in_len = 6;
com.out_len = 0;
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 84f04e0e0cb9..3d296f1998a1 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
struct FW_CONFIGURE_FREE_BUFFERS {
struct FW_HEADER hdr;
- u8 UVI1_BufferLength;
- u8 UVI2_BufferLength;
- u8 TVO_BufferLength;
- u8 AUD1_BufferLength;
- u8 AUD2_BufferLength;
- u8 TVA_BufferLength;
+ struct {
+ u8 UVI1_BufferLength;
+ u8 UVI2_BufferLength;
+ u8 TVO_BufferLength;
+ u8 AUD1_BufferLength;
+ u8 AUD2_BufferLength;
+ u8 TVA_BufferLength;
+ } __packed config;
} __attribute__ ((__packed__));
struct FW_CONFIGURE_UART {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 7a6f01ace78a..305ffad131a2 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client)
}
/*
- * If the 'label' property is not present for the AT24 EEPROM,
- * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
- * and this will append the 'devid' to the name of the NVMEM
- * device. This is purely legacy and the AT24 driver has always
- * defaulted to this. However, if the 'label' property is
- * present then this means that the name is specified by the
- * firmware and this name should be used verbatim and so it is
- * not necessary to append the 'devid'.
+ * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
+ * label property is set as some platform can have multiple eeproms
+ * with same label and we can not register each of those with same
+ * label. Failing to register those eeproms trigger cascade failure
+ * on such platform.
*/
+ nvmem_config.id = NVMEM_DEVID_AUTO;
+
if (device_property_present(dev, "label")) {
- nvmem_config.id = NVMEM_DEVID_NONE;
err = device_property_read_string(dev, "label",
&nvmem_config.name);
if (err)
return err;
} else {
- nvmem_config.id = NVMEM_DEVID_AUTO;
nvmem_config.name = dev_name(dev);
}
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index dd17b8c53e1c..89d9c986a229 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
return ret;
}
-static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+static int hi3110_cmd(struct spi_device *spi, u8 command)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 2b1e57552e1c..6c369a399c45 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2304,6 +2304,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
err, priv->regs_status.intf);
mcp251xfd_dump(priv);
mcp251xfd_chip_interrupts_disable(priv);
+ mcp251xfd_timestamp_stop(priv);
return handled;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 0a37af4a3fa4..2b5302e72435 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -255,6 +255,8 @@ struct ems_usb {
unsigned int free_slots; /* remember number of available slots */
struct ems_cpc_msg active_params; /* active controller parameters */
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
static void ems_usb_read_interrupt_callback(struct urb *urb)
@@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
buf, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
@@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
usb_kill_anchored_urbs(&dev->rx_submitted);
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 60f3e0ca080a..7370981e9b34 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -195,6 +195,8 @@ struct esd_usb2 {
int net_count;
u32 version;
int rxinitdone;
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
struct esd_usb2_net_priv {
@@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
dev_warn(dev->udev->dev.parent,
"No memory left for USB buffer\n");
@@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
goto freeurb;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
@@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ goto freeurb;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
freeurb:
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
@@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
for (i = 0; i < dev->net_count; i++) {
priv = dev->nets[i];
if (priv) {
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index a45865bd7254..a1a154c08b7f 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
buf, MCBA_USB_RX_BUFF_SIZE,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index e36e60c3703a..837b3fecd71e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -123,7 +123,8 @@
#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
/* identify bus event packets with rx/tx error counters */
-#define PCAN_USB_ERR_CNT 0x80
+#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */
+#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */
/* private to PCAN-USB adapter */
struct pcan_usb {
@@ -535,11 +536,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
/* acccording to the content of the packet */
switch (ir) {
- case PCAN_USB_ERR_CNT:
+ case PCAN_USB_ERR_CNT_DEC:
+ case PCAN_USB_ERR_CNT_INC:
/* save rx/tx error counters from in the device context */
- pdev->bec.rxerr = mc->ptr[0];
- pdev->bec.txerr = mc->ptr[1];
+ pdev->bec.rxerr = mc->ptr[1];
+ pdev->bec.txerr = mc->ptr[2];
break;
default:
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index b6e7ef0d5bc6..d1b83bd1b3cb 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -137,7 +137,8 @@ struct usb_8dev_priv {
u8 *cmd_msg_buffer;
struct mutex usb_8dev_cmd_lock;
-
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
/* tx frame */
@@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev,
USB_8DEV_ENDP_DATA_RX),
@@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
+ priv->rxbuf[i] = buf;
+ priv->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
usb_kill_anchored_urbs(&priv->rx_submitted);
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
+ priv->rxbuf[i], priv->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_urbs, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index af764b8445b7..ddb51dd132ef 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2176,7 +2176,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
int i, err;
if (!vid)
- return -EOPNOTSUPP;
+ return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4a92ea7fa7d2..03b821897cf3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12168,9 +12168,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, rc);
- if (!rc)
- bnxt_reenable_sriov(bp);
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index e33e311e2341..7f55ebbfd04b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -560,6 +560,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
+ if (time_after_eq(now, ptp->next_overflow_check)) {
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+ }
return HZ;
}
@@ -713,6 +719,7 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp->cc.shift = 0;
ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
ptp->ptp_info = bnxt_ptp_caps;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 88923346ab50..cc3cdbaab6cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -83,6 +83,10 @@ struct bnxt_ptp_cfg {
u64 current_time;
u64 old_time;
unsigned long next_period;
+ unsigned long next_overflow_check;
+ /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+
u16 tx_seqid;
struct bnxt *bp;
atomic_t tx_avail;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 07a48f6bf0fa..85b99099c6b9 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
void __iomem *ioaddr;
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) return i;
pci_set_master(pdev);
@@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
- goto err_out_free_res;
+ goto err_out_netdev;
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_cleardev:
pci_iounmap(pdev, ioaddr);
-err_out_free_res:
- pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
return -ENODEV;
@@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
- pci_release_regions(pdev);
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 3b1f84502e36..befa9bcc2f2f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -5,9 +5,27 @@
#include "hclge_main.h"
#include "hnae3.h"
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+ HCLGE_PTP_CYCLE_QUO_MASK;
+ ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+ if (ptp->cycle.den == 0) {
+ dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
u64 adj_val, adj_base, diff;
unsigned long flags;
bool is_neg = false;
@@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
is_neg = true;
}
- adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+ adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
adj_val = adj_base * ppb;
diff = div_u64(adj_val, 1000000000ULL);
@@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/* This clock cycle is defined by three part: quotient, numerator
* and denominator. For example, 2.5ns, the quotient is 2,
- * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
- * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+ * denominator is fixed to ptp->cycle.den, and numerator
+ * is 0.5 * ptp->cycle.den.
*/
- quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+ quo = div_u64_rem(adj_val, cycle->den, &numerator);
spin_lock_irqsave(&hdev->ptp->lock, flags);
- writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
- writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
- hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
writel(HCLGE_PTP_CYCLE_ADJ_EN,
hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
@@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_create_clock(hdev);
if (ret)
return ret;
+
+ ret = hclge_ptp_get_cycle(hdev);
+ if (ret)
+ return ret;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 5a202b775471..dbf5f4c08019 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -29,6 +29,7 @@
#define HCLGE_PTP_TIME_ADJ_REG 0x60
#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
#define HCLGE_PTP_CYCLE_DEN_REG 0x68
#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
#define HCLGE_PTP_CYCLE_CFG_REG 0x70
@@ -37,9 +38,7 @@
#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
-#define HCLGE_PTP_CYCLE_ADJ_BASE 2
#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
-#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
#define HCLGE_PTP_SEC_H_OFFSET 32u
#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
@@ -47,6 +46,12 @@
#define HCLGE_PTP_FLAG_TX_EN 1
#define HCLGE_PTP_FLAG_RX_EN 2
+struct hclge_ptp_cycle {
+ u32 quo;
+ u32 numer;
+ u32 den;
+};
+
struct hclge_ptp {
struct hclge_dev *hdev;
struct ptp_clock *clock;
@@ -58,6 +63,7 @@ struct hclge_ptp {
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
+ struct hclge_ptp_cycle cycle;
unsigned long tx_start;
unsigned long tx_cnt;
unsigned long tx_skipped;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3e822bad4851..2c9e4eeb7270 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev,
- "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
hw_link_info->phy_type);
}
@@ -5294,6 +5294,10 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
+ case I40E_AQ_RC_EAGAIN:
+ dev_warn(&pf->pdev->dev,
+ "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+ return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %s, %s\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5b4012a09acb..97c78551395b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4457,11 +4457,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
}
/**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
@@ -4470,7 +4469,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q,
- false /*is xdp*/, enable);
+ false /*is xdp*/, true);
if (ret)
break;
@@ -4479,7 +4478,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q + vsi->alloc_queue_pairs,
- true /*is xdp*/, enable);
+ true /*is xdp*/, true);
if (ret)
break;
}
@@ -4577,32 +4576,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, true);
if (ret) {
dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d enable timeout\n",
+ vsi->seid, pf_q);
break;
}
}
- /* Due to HW errata, on Rx disable only, the register can indicate done
- * before it really is. Needs 50ms to be sure
- */
- if (!enable)
- mdelay(50);
-
return ret;
}
@@ -4615,29 +4607,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
int ret = 0;
/* do rx first for enable and last for disable */
- ret = i40e_vsi_control_rx(vsi, true);
+ ret = i40e_vsi_enable_rx(vsi);
if (ret)
return ret;
- ret = i40e_vsi_control_tx(vsi, true);
+ ret = i40e_vsi_enable_tx(vsi);
return ret;
}
+#define I40E_DISABLE_TX_GAP_MSEC 50
+
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, err, q_end;
+
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- /* do rx first for enable and last for disable
- * Ignore return value, we need to shutdown whatever we can
- */
- i40e_vsi_control_tx(vsi, false);
- i40e_vsi_control_rx(vsi, false);
+ q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+ err = i40e_control_wait_rx_q(pf, pf_q, false);
+ if (err)
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d dissable timeout\n",
+ vsi->seid, pf_q);
+ }
+
+ msleep(I40E_DISABLE_TX_GAP_MSEC);
+ pf_q = vsi->base_queue;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+ i40e_vsi_wait_queues_disabled(vsi);
}
/**
@@ -7283,6 +7293,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to create traffic channel, insufficient number of queues.\n");
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -13264,6 +13276,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
+ .ndo_select_queue = i40e_lan_select_queue,
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 38eb8151ee9a..3f25bd8c4924 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3631,6 +3631,56 @@ dma_error:
return -1;
}
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 num_tx_queues)
+{
+ u32 jhash_initval_salt = 0xd631614b;
+ u32 hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16)skb->protocol ^ skb->hash;
+
+ hash = jhash_1word(hash, jhash_initval_salt);
+
+ return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device __always_unused *sb_dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw;
+ u16 qoffset;
+ u16 qcount;
+ u8 tclass;
+ u16 hash;
+ u8 prio;
+
+ /* is DCB enabled at all? */
+ if (vsi->tc_config.numtc == 1)
+ return i40e_swdcb_skb_tx_hash(netdev, skb,
+ netdev->real_num_tx_queues);
+
+ prio = skb->priority;
+ hw = &vsi->back->hw;
+ tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+ /* sanity check */
+ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+ tclass = 0;
+
+ /* select a queue assigned for the given TC */
+ qcount = vsi->tc_config.tc_info[tclass].qcount;
+ hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+ qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+ return qoffset + hash;
+}
+
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
* @xdpf: data to transmit
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 86fed05b4f19..bfc2845c99d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 9169849881bf..544c96c8fe1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1504,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
- cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 19bad9a59c8f..243cf8070e77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
enum npc_pkind_type {
+ NPC_RX_LBK_PKIND = 0ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 017163fb3cd5..5fe277e354f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
- *numvfs = (cfg >> 12) & 0xFF;
- *hwvf = cfg & 0xFFF;
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0933699a0d2d..4bfbbdf38770 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
@@ -3842,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
- devm_kfree(rvu->dev, vlan->entry2pfvf_map);
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 1097291aaa45..52b255426c22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1721,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1739,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1891,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
if (npc_const1 & BIT_ULL(63))
npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
- pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
hw->npc_intfs = npc_const & 0xFULL;
@@ -2002,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 2e5379710aa5..820adf390b8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -71,8 +71,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
struct rvu_switch *rswitch = &rvu->rswitch;
u16 start = rswitch->start_entry;
struct rvu_hwinfo *hw = rvu->hw;
- int pf, vf, numvfs, hwvf;
u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
int err;
for (pf = 1; pf < hw->total_pfs; pf++) {
@@ -110,8 +110,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
rswitch->entry2pcifunc[entry++] = pcifunc;
- rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
- for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
rvu_get_nix_blkaddr(rvu, pcifunc);
@@ -198,7 +198,7 @@ void rvu_switch_disable(struct rvu *rvu)
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct rvu_hwinfo *hw = rvu->hw;
- int pf, vf, numvfs, hwvf;
+ int pf, vf, numvfs;
struct msg_rsp rsp;
u16 pcifunc;
int err;
@@ -217,7 +217,8 @@ void rvu_switch_disable(struct rvu *rvu)
"Reverting RX rule for PF%d failed(%d)\n",
pf, err);
- for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 7cccd802c4ed..70fcc1fd962f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -924,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -1186,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
aq->aura.nix0_bpid = pfvf->bpid[0];
/* Set backpressure level for RQ's Aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8df748e0677b..b906a0eb6e0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
if (err)
- goto fail;
+ return err;
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
-fail:
if (if_up)
- dev->netdev_ops->ndo_open(dev);
+ err = dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->rqe_cnt = rx_count;
if (if_up)
- netdev->netdev_ops->ndo_open(netdev);
+ return netdev->netdev_ops->ndo_open(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 3f03bbdd8d04..22b7af029ebf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1662,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
err_free_cints:
otx2_free_cints(pf, qidx);
vec = pci_irq_vector(pf->pdev,
@@ -1689,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
struct otx2_rss_info *rss;
int qidx, vec, wrk;
+ /* If the DOWN flag is set resources are already freed */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 00c84656b2e7..28ac4693da3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3535,6 +3535,7 @@ slave_start:
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
+ err = -EINVAL;
goto err_close;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ceebfc20f65e..def2156e50ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* This function is called with two flows:
- * 1. During initialization of mlx5_core_dev and we don't need to lock it.
- * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
- */
+/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
struct auxiliary_device *adev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index fc602d85ca48..3cbb596821e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -483,6 +483,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
+static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+ MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+
+ return ro && params->lro_en ?
+ MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+}
+
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -520,7 +529,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 849ee3e147c4..8ff8b02c056f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->log_sq_size = orig->log_sq_size;
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ /* RQ */
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ params->vlan_strip_disable = orig->vlan_strip_disable;
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ }
}
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
int err;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index afaf5b413066..d54607a42740 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
struct mlx5e_priv *priv = t->priv;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c663811f210b..3e644d3955a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3223,7 +3223,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
- int err = 0;
+ int err;
int i;
for (i = 0; i < chs->num; i++) {
@@ -3231,6 +3231,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
if (err)
return err;
}
+ if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+ return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0;
}
@@ -3668,6 +3670,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_NTUPLE;
+ if (netdev->features & NETIF_F_NTUPLE)
+ netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
+ return features;
+}
+
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3699,15 +3719,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
- if (mlx5e_is_uplink_rep(priv)) {
- features &= ~NETIF_F_HW_TLS_RX;
- if (netdev->features & NETIF_F_HW_TLS_RX)
- netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
-
- features &= ~NETIF_F_HW_TLS_TX;
- if (netdev->features & NETIF_F_HW_TLS_TX)
- netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
- }
+ if (mlx5e_is_uplink_rep(priv))
+ features = mlx5e_fix_uplink_rep_features(netdev, features);
mutex_unlock(&priv->state_lock);
@@ -4701,6 +4714,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_ETH(mdev, scatter_fcs))
netdev->hw_features |= NETIF_F_RXFCS;
+ if (mlx5_qos_is_supported(mdev))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->features = netdev->hw_features;
/* Defaults */
@@ -4721,8 +4737,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
- if (mlx5_qos_is_supported(mdev))
- netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0cee2fa76d65..2ef02fea119a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
+ struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
- netdev = __dev_get_by_index(net, ifindex);
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev)
+ return ERR_PTR(-ENODEV);
+
priv = netdev_priv(netdev);
- return priv->mdev;
+ mdev = priv->mdev;
+ dev_put(netdev);
+
+ /* Mirred tc action holds a refcount on the ifindex net_device (see
+ * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
+ * after dev_put(netdev), while we're in the context of adding a tc flow.
+ *
+ * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
+ * stored in a hairpin object, which exists until all flows, that refer to it, get
+ * removed.
+ *
+ * On the other hand, after a hairpin object has been created, the peer net_device may
+ * be removed/unbound while there are still some hairpin flows that are using it. This
+ * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
+ * NETDEV_UNREGISTER event of the peer net_device.
+ */
+ return mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@@ -640,6 +660,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ err = PTR_ERR(peer_mdev);
+ goto create_pair_err;
+ }
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
@@ -778,6 +802,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
+ return PTR_ERR(peer_mdev);
+ }
+
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 48cac5bf606d..d562edf5b0bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
};
struct mlx5_vport_tbl_attr {
- u16 chain;
+ u32 chain;
u16 prio;
u16 vport;
const struct esw_vport_tbl_namespace *vport_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7579f3402776..011e766e4f67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
- dest[dest_idx].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ }
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
+ if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
+ break;
+
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d7bf0a3e4a52..c0697e1b7118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
- struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *next_ft, *first_ft;
int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
- if (list_empty(&prio->node.children)) {
+ first_ft = list_first_entry_or_null(&prio->node.children,
+ struct mlx5_flow_table, node.list);
+ if (!first_ft || first_ft->level > ft->level) {
err = connect_prev_fts(dev, ft, prio);
if (err)
return err;
- next_ft = find_next_chained_ft(prio);
+ next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
node.list) == ft))
return 0;
- next_ft = find_next_chained_ft(prio);
+ next_ft = find_next_ft(ft);
err = connect_fwd_rules(dev, next_ft, ft);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9ff163c5bcde..9abeb80ffa31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
fw_reporter_ctx.err_synd = health->synd;
fw_reporter_ctx.miss_counter = health->miss_counter;
- devlink_health_report(health->fw_fatal_reporter,
- "FW fatal error reported", &fw_reporter_ctx);
+ if (devlink_health_report(health->fw_fatal_reporter,
+ "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
+ /* If recovery wasn't performed, due to grace period,
+ * unload the driver. This ensures that the driver
+ * closes all its resources and it is not subjected to
+ * requests from the kernel.
+ */
+ mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+ mlx5_unload_one(dev);
+ }
}
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f21f80cee188..f52c47a71f4b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -30,7 +30,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
*/
};
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static void ionic_lif_rx_mode(struct ionic_lif *lif);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
static void ionic_link_status_check(struct ionic_lif *lif);
@@ -54,7 +54,19 @@ static void ionic_dim_work(struct work_struct *work)
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
- qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ new_coal = new_coal ? new_coal : 1;
+
+ if (qcq->intr.dim_coal_hw != new_coal) {
+ unsigned int qi = qcq->cq.bound_q->index;
+ struct ionic_lif *lif = qcq->q.lif;
+
+ qcq->intr.dim_coal_hw = new_coal;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+ }
+
dim->state = DIM_START_MEASURE;
}
@@ -78,7 +90,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
switch (w->type) {
case IONIC_DW_TYPE_RX_MODE:
- ionic_lif_rx_mode(lif, w->rx_mode);
+ ionic_lif_rx_mode(lif);
break;
case IONIC_DW_TYPE_RX_ADDR_ADD:
ionic_lif_addr_add(lif, w->addr);
@@ -1302,10 +1314,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return 0;
}
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
- bool can_sleep)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
{
- struct ionic_deferred_work *work;
unsigned int nmfilters;
unsigned int nufilters;
@@ -1331,97 +1341,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
lif->nucast--;
}
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
- work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
- IONIC_DW_TYPE_RX_ADDR_DEL;
- memcpy(work->addr, addr, ETH_ALEN);
- netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- if (add)
- return ionic_lif_addr_add(lif, addr);
- else
- return ionic_lif_addr_del(lif, addr);
- }
+ netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+ add ? "add" : "del", addr);
+ if (add)
+ return ionic_lif_addr_add(lif, addr);
+ else
+ return ionic_lif_addr_del(lif, addr);
return 0;
}
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
-}
-
-static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
-{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
-static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+static void ionic_lif_rx_mode(struct ionic_lif *lif)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
-}
-
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_mode_set = {
- .opcode = IONIC_CMD_RX_MODE_SET,
- .lif_index = cpu_to_le16(lif->index),
- .rx_mode = cpu_to_le16(rx_mode),
- },
- };
+ struct net_device *netdev = lif->netdev;
+ unsigned int nfilters;
+ unsigned int nd_flags;
char buf[128];
- int err;
+ u16 rx_mode;
int i;
#define REMAIN(__x) (sizeof(buf) - (__x))
- i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
- lif->rx_mode, rx_mode);
- if (rx_mode & IONIC_RX_MODE_F_UNICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
- if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
- if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
- if (rx_mode & IONIC_RX_MODE_F_PROMISC)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
- if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
- netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
-
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
- netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
- rx_mode, err);
- else
- lif->rx_mode = rx_mode;
-}
+ mutex_lock(&lif->config_lock);
-static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_deferred_work *work;
- unsigned int nfilters;
- unsigned int rx_mode;
+ /* grab the flags once for local use */
+ nd_flags = netdev->flags;
rx_mode = IONIC_RX_MODE_F_UNICAST;
- rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
- rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
- rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
- rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+ rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+ rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+ rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+ rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
/* sync unicast addresses
* next check to see if we're in an overflow state
@@ -1430,49 +1389,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- if (can_sleep)
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
lif->uc_overflow = true;
} else if (lif->uc_overflow) {
lif->uc_overflow = false;
- if (!(netdev->flags & IFF_PROMISC))
+ if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
}
/* same for multicast */
- if (can_sleep)
- __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
lif->mc_overflow = true;
} else if (lif->mc_overflow) {
lif->mc_overflow = false;
- if (!(netdev->flags & IFF_ALLMULTI))
+ if (!(nd_flags & IFF_ALLMULTI))
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
}
+ i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+ lif->rx_mode, rx_mode);
+ if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+ if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+ if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+ if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+ if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+ if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
+ netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
+
if (lif->rx_mode != rx_mode) {
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "rxmode change dropped\n");
- return;
- }
- work->type = IONIC_DW_TYPE_RX_MODE;
- work->rx_mode = rx_mode;
- netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- ionic_lif_rx_mode(lif, rx_mode);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_mode_set = {
+ .opcode = IONIC_CMD_RX_MODE_SET,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ int err;
+
+ ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
+ rx_mode, err);
+ else
+ lif->rx_mode = rx_mode;
+ }
+
+ mutex_unlock(&lif->config_lock);
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_deferred_work *work;
+
+ if (!can_sleep) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "rxmode change dropped\n");
+ return;
}
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif);
}
}
@@ -3074,6 +3067,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3201,7 +3195,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -3218,7 +3212,7 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
return 0;
}
@@ -3241,6 +3235,7 @@ int ionic_lif_init(struct ionic_lif *lif)
lif->hw_index = le16_to_cpu(comp.hw_index);
mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 346506f01715..69ab59fedb6c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -108,7 +108,6 @@ struct ionic_deferred_work {
struct list_head list;
enum ionic_deferred_work_type type;
union {
- unsigned int rx_mode;
u8 addr[ETH_ALEN];
u8 fw_status;
};
@@ -179,6 +178,7 @@ struct ionic_lif {
unsigned int index;
unsigned int hw_index;
struct mutex queue_lock; /* lock for queue structures */
+ struct mutex config_lock; /* lock for config actions */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
@@ -199,7 +199,7 @@ struct ionic_lif {
unsigned int nrxq_descs;
u32 rx_copybreak;
u64 rxq_features;
- unsigned int rx_mode;
+ u16 rx_mode;
u64 hw_features;
bool registered;
bool mc_overflow;
@@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
@@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
void ionic_lif_alloc_phc(struct ionic_lif *lif);
void ionic_lif_free_phc(struct ionic_lif *lif);
#else
-static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
-{
- return -EOPNOTSUPP;
-}
+static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 736ebc5da0f7..afc45da399d4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
return 0;
}
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
{
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return;
+
err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
-
- return err;
}
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ba19246d763..37c39581b659 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -269,12 +269,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (likely(netdev->features & NETIF_F_RXCSUM)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
- stats->csum_complete++;
- }
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+ stats->csum_complete++;
} else {
stats->csum_none++;
}
@@ -446,11 +445,12 @@ void ionic_rx_empty(struct ionic_queue *q)
q->tail_idx = 0;
}
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
{
struct dim_sample dim_sample;
struct ionic_lif *lif;
unsigned int qi;
+ u64 pkts, bytes;
if (!qcq->intr.dim_coal_hw)
return;
@@ -458,14 +458,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
lif = qcq->q.lif;
qi = qcq->cq.bound_q->index;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[qi]->intr.index,
- qcq->intr.dim_coal_hw);
+ switch (napi_mode) {
+ case IONIC_LIF_F_TX_DIM_INTR:
+ pkts = lif->txqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes;
+ break;
+ case IONIC_LIF_F_RX_DIM_INTR:
+ pkts = lif->rxqstats[qi].pkts;
+ bytes = lif->rxqstats[qi].bytes;
+ break;
+ default:
+ pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+ break;
+ }
dim_update_sample(qcq->cq.bound_intr->rearm_count,
- lif->txqstats[qi].pkts,
- lif->txqstats[qi].bytes,
- &dim_sample);
+ pkts, bytes, &dim_sample);
net_dim(&qcq->dim, dim_sample);
}
@@ -486,7 +495,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -525,7 +534,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -571,7 +580,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c59b72c90293..a2e4dfb5cb44 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan = NULL;
+ struct qede_vlan *vlan;
int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vlan->vid == vid)
break;
- if (!vlan || (vlan->vid != vid)) {
+ if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
goto out;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2376b2729633..c00ad57575ea 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- ssleep(1);
+ mdelay(1000);
} while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
@@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
if ((value & ISP_CONTROL_SR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
/*
@@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
ispControlStatus);
if ((value & ISP_CONTROL_FSR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
}
if (max_wait_time == 0)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ec6f7f993eb7..60a0c0e9ded2 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
#endif
/* setup various bits in PCI command register */
- ret = pci_enable_device(pci_dev);
+ ret = pcim_enable_device(pci_dev);
if(ret) return ret;
i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
@@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
ret = -ENOMEM;
- goto err_out_cleardev;
+ goto err_out;
}
sis_priv = netdev_priv(net_dev);
@@ -581,8 +581,6 @@ err_unmap_tx:
sis_priv->tx_ring_dma);
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
-err_out_cleardev:
- pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
return ret;
@@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
sis_priv->tx_ring_dma);
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
- pci_release_regions(pci_dev);
}
static int __maybe_unused sis900_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 67ba083eb90c..b21745368983 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
@@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 006fd4237725..a95e99da1ae5 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
err = niu_pci_vpd_scan_props(np, here, end);
if (err < 0)
return err;
+ /* ret == 1 is not an error */
if (err == 1)
- return -EINVAL;
+ return 0;
}
return 0;
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 7bf3011b8e77..83aea5c5cd03 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 1df959532c7d..514f2c1124b6 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = {
/* Assigned at module init. Guaranteed locally-administered and unicast. */
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+static void virt_wifi_inform_bss(struct wiphy *wiphy)
+{
+ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+ struct cfg80211_bss *informed_bss;
+ static const struct {
+ u8 tag;
+ u8 len;
+ u8 ssid[8];
+ } __packed ssid = {
+ .tag = WLAN_EID_SSID,
+ .len = 8,
+ .ssid = "VirtWifi",
+ };
+
+ informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+ CFG80211_BSS_FTYPE_PRESP,
+ fake_router_bssid, tsf,
+ WLAN_CAPABILITY_ESS, 0,
+ (void *)&ssid, sizeof(ssid),
+ DBM_TO_MBM(-50), GFP_KERNEL);
+ cfg80211_put_bss(wiphy, informed_bss);
+}
+
/* Called with the rtnl lock held. */
static int virt_wifi_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
@@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy,
/* Acquires and releases the rdev BSS lock. */
static void virt_wifi_scan_result(struct work_struct *work)
{
- struct {
- u8 tag;
- u8 len;
- u8 ssid[8];
- } __packed ssid = {
- .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
- };
- struct cfg80211_bss *informed_bss;
struct virt_wifi_wiphy_priv *priv =
container_of(work, struct virt_wifi_wiphy_priv,
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
- u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
- informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
- CFG80211_BSS_FTYPE_PRESP,
- fake_router_bssid, tsf,
- WLAN_CAPABILITY_ESS, 0,
- (void *)&ssid, sizeof(ssid),
- DBM_TO_MBM(-50), GFP_KERNEL);
- cfg80211_put_bss(wiphy, informed_bss);
+ virt_wifi_inform_bss(wiphy);
/* Schedules work which acquires and releases the rtnl lock. */
cfg80211_scan_done(priv->scan_request, &scan_info);
@@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
if (!could_schedule)
return -EBUSY;
- if (sme->bssid)
+ if (sme->bssid) {
ether_addr_copy(priv->connect_requested_bss, sme->bssid);
- else
+ } else {
+ virt_wifi_inform_bss(wiphy);
eth_zero_addr(priv->connect_requested_bss);
+ }
wiphy_debug(wiphy, "connect\n");
@@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work)
struct virt_wifi_netdev_priv *priv =
container_of(work, struct virt_wifi_netdev_priv, connect.work);
u8 *requested_bss = priv->connect_requested_bss;
- bool has_addr = !is_zero_ether_addr(requested_bss);
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
u16 status = WLAN_STATUS_SUCCESS;
- if (!priv->is_up || (has_addr && !right_addr))
+ if (is_zero_ether_addr(requested_bss))
+ requested_bss = NULL;
+
+ if (!priv->is_up || (requested_bss && !right_addr))
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
else
priv->is_connected = true;
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 15754671eb4d..85bf8d586c70 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
if (!IS_ERR(skb))
dev_kfree_skb(skb);
-
- skb = ERR_PTR(-ENODEV);
+ return;
}
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 1421ffd46d9a..1af7a1e632cf 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -422,7 +422,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot allocate shash (code=%d)\n", ret);
+ "Cannot allocate shash (code=%pe)\n", tfm);
return PTR_ERR(tfm);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 11779be42186..dfd9dec0c1f6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -900,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- cmnd->write_zeroes.control = 0;
+ if (nvme_ns_has_pi(ns))
+ cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+ else
+ cmnd->write_zeroes.control = 0;
return BLK_STS_OK;
}
@@ -3807,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ bool last_path = false;
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -3815,8 +3820,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
- if (list_empty(&ns->head->list))
- list_del_init(&ns->head->entry);
mutex_unlock(&ns->ctrl->subsys->lock);
synchronize_rcu(); /* guarantee not available in head->list */
@@ -3836,7 +3839,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- nvme_mpath_check_last_path(ns);
+ /* Synchronize with nvme_init_ns_head() */
+ mutex_lock(&ns->head->subsys->lock);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
+ mutex_unlock(&ns->head->subsys->lock);
+ if (last_path)
+ nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0ea5298469c3..3f32c5e86bfc 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
#endif
}
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
+ kblockd_schedule_work(&head->requeue_work);
if (head->disk->flags & GENHD_FL_UP) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 18ef8dd03a90..5cd1fa3b8464 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-{
- struct nvme_ns_head *head = ns->head;
-
- if (head->disk && list_empty(&head->list))
- kblockd_schedule_work(&head->requeue_work);
-}
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
static inline void nvme_trace_bio_complete(struct request *req)
{
@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
}
static inline void nvme_trace_bio_complete(struct request *req)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 320051f5a3dd..51852085239e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2631,7 +2631,9 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+ if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+ dev->ctrl.state);
result = -ENODEV;
goto out;
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index daaf700eae79..35bac7a25422 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd,
__field(u8, fctype)
__field(u16, cid)
__field(u32, nsid)
- __field(u64, metadata)
+ __field(bool, metadata)
__array(u8, cdw10, 24)
),
TP_fast_assign(
@@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
__assign_disk_name(__entry->disk, req->rq_disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
sizeof(__entry->cdw10));
),
- TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b9da58ee9b1e..3481479a2942 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -46,34 +46,79 @@
#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMC_RESULT_FAILED 0xFF
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
/* List of supported CPU ids */
#define AMD_CPU_ID_RV 0x15D0
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
-#define AMD_SMU_FW_VERSION 0x0
#define PMC_MSG_DELAY_MIN_US 100
#define RESPONSE_REGISTER_LOOP_MAX 200
+#define SOC_SUBSYSTEM_IP_MAX 12
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
MSG_OS_HINT_RN,
};
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"ACP", BIT(4)},
+ {"VCN", BIT(5)},
+ {"ISP", BIT(6)},
+ {"NBIO", BIT(7)},
+ {"DF", BIT(8)},
+ {"USB0", BIT(9)},
+ {"USB1", BIT(10)},
+ {"LAPIC", BIT(11)},
+ {}
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
- void __iomem *smu_base;
+ void __iomem *smu_virt_addr;
+ void __iomem *fch_virt_addr;
u32 base_addr;
u32 cpu_id;
+ u32 active_ips;
struct device *dev;
+ struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
};
static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_cyclecount;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
- u32 value;
+ struct smu_metrics table;
+ int idx;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+ seq_puts(s, "\n=== SMU Statistics ===\n");
+ seq_printf(s, "Table Version: %d\n", table.table_version);
+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
+ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+ seq_puts(s, "\n=== Active time (in us) ===\n");
+ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ table.timecondition_notmet_lastcapture[idx]);
+ }
- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
- seq_printf(s, "SMU FW Info: %x\n", value);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ u64 entry_time, exit_time, residency;
+
+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+ /* It's in 48MHz. We need to convert it */
+ residency = exit_time - entry_time;
+ do_div(residency, 48);
+
+ seq_puts(s, "=== S0ix statistics ===\n");
+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+ seq_printf(s, "Residency Time: %lld\n", residency);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
&smu_fw_info_fops);
+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+ &s0ix_stats_fops);
}
#else
static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
}
#endif /* CONFIG_DEBUG_FS */
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 smu_phys_addr;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ /* Get Active devices list from SMU */
+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+ /* Get dram address */
+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+ if (!dev->smu_virt_addr)
+ return -ENOMEM;
+
+ /* Start the logging */
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+ return 0;
+}
+
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value;
@@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
{
int rc;
- u8 msg;
u32 val;
+ mutex_lock(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
- val, val > 0, PMC_MSG_DELAY_MIN_US,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- return rc;
+ goto out_unlock;
}
/* Write zero to response register */
@@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
/* Write message ID to message ID register */
- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
- return 0;
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMC_RESULT_OK:
+ if (ret) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMC_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmc_dump_registers(dev);
+ return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ return MSG_OS_HINT_PCO;
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ return MSG_OS_HINT_RN;
+ }
+ return -EINVAL;
}
static int __maybe_unused amd_pmc_suspend(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
- rc = amd_pmc_send_cmd(pdev, 1);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "suspend failed\n");
- amd_pmc_dump_registers(pdev);
- return 0;
+ return rc;
}
static int __maybe_unused amd_pmc_resume(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
- rc = amd_pmc_send_cmd(pdev, 0);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "resume failed\n");
- amd_pmc_dump_registers(pdev);
return 0;
}
@@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
};
static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
struct pci_dev *rdev;
- u32 base_addr_lo;
- u32 base_addr_hi;
- u64 base_addr;
+ u32 base_addr_lo, base_addr_hi;
+ u64 base_addr, fch_phys_addr;
int err;
u32 val;
@@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
pci_dev_put(rdev);
base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
- if (!dev->smu_base)
- return -ENOMEM;
-
dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
AMD_PMC_MAPPING_SIZE);
if (!dev->regbase)
return -ENOMEM;
- amd_pmc_dump_registers(dev);
+ mutex_init(&dev->lock);
+
+ /* Use FCH registers to get the S0ix stats */
+ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+ if (!dev->fch_virt_addr)
+ return -ENOMEM;
+
+ /* Use SMU to get the s0i3 debug stats */
+ err = amd_pmc_setup_smu_logging(dev);
+ if (err)
+ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
@@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
amd_pmc_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
return 0;
}
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
+ {"AMDI0006", 0},
+ {"AMDI0007", 0},
{"AMD0004", 0},
{ }
};
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 5529d7b0abea..fbb224a82e34 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 078648a9201b..e5fbe017f8e1 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -25,6 +25,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INT33D5", 0},
{"INTC1051", 0},
{"INTC1054", 0},
+ {"INTC1070", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3671b5d20613..6cfed4427fb0 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj,
else
ret = tlmi_save_bios_settings("");
+ if (!ret && !tlmi_priv.pending_changes) {
+ tlmi_priv.pending_changes = true;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
out:
kfree(auth_str);
kfree(set_str);
@@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = {
.sysfs_ops = &tlmi_kobj_sysfs_ops,
};
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
/* ---- Initialisation --------------------------------------------------------- */
static void tlmi_release_attr(void)
{
@@ -659,6 +672,7 @@ static void tlmi_release_attr(void)
kobject_put(&tlmi_priv.setting[i]->kobj);
}
}
+ sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
kset_unregister(tlmi_priv.attribute_kset);
/* Authentication structures */
@@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void)
/* Build attribute */
tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
- ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
- NULL, "%s", tlmi_priv.setting[i]->display_name);
+ ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+ "%s", tlmi_priv.setting[i]->display_name);
if (ret)
goto fail_create_attr;
@@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
}
+ ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ if (ret)
+ goto fail_create_attr;
+
/* Create authentication entries */
tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
&tlmi_priv.class_dev->kobj);
@@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
}
tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
- ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
- NULL, "%s", "Admin");
+ ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
if (ret)
goto fail_create_attr;
@@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
- ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
- NULL, "%s", "System");
+ ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
if (ret)
goto fail_create_attr;
@@ -818,6 +834,7 @@ static int tlmi_analyze(void)
pr_info("Error retrieving possible values for %d : %s\n",
i, setting->display_name);
}
+ kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
tlmi_priv.setting[i] = setting;
tlmi_priv.settings_count++;
kfree(item);
@@ -844,10 +861,12 @@ static int tlmi_analyze(void)
if (pwdcfg.password_state & TLMI_PAP_PWD)
tlmi_priv.pwd_admin->valid = true;
+ kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+
tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
if (!tlmi_priv.pwd_power) {
ret = -ENOMEM;
- goto fail_clear_attr;
+ goto fail_free_pwd_admin;
}
strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
@@ -859,11 +878,19 @@ static int tlmi_analyze(void)
if (pwdcfg.password_state & TLMI_POP_PWD)
tlmi_priv.pwd_power->valid = true;
+ kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+
return 0;
+fail_free_pwd_admin:
+ kfree(tlmi_priv.pwd_admin);
fail_clear_attr:
- for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
- kfree(tlmi_priv.setting[i]);
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+ if (tlmi_priv.setting[i]) {
+ kfree(tlmi_priv.setting[i]->possible_values);
+ kfree(tlmi_priv.setting[i]);
+ }
+ }
return ret;
}
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 6fa8da7af6c7..eb598846628a 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -60,6 +60,7 @@ struct think_lmi {
bool can_get_bios_selections;
bool can_set_bios_password;
bool can_get_password_settings;
+ bool pending_changes;
struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
struct device *class_dev;
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index b010e4ca3383..11c60a273446 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device)
err = wireless_input_setup();
if (err)
- pr_err("Failed to setup hp wireless hotkeys\n");
+ pr_err("Failed to setup wireless hotkeys\n");
return err;
}
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 84fc7a0c6ff4..4a84599ff491 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2642,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#endif
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
+ fallthrough;
/*
* We found the command, and cleared it out. Either
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 6baa9b36367d..9c4458a99025 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case IS_COMPLETE:
break;
}
+ break;
default:
break;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index c39955239d1c..19b1c0cf5f2a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2983,13 +2983,13 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_free_irq - free irq
+ * mpt3sas_base_free_irq - free irq
* @ioc: per adapter object
*
* Freeing respective reply_queue from the list.
*/
-static void
-_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
struct adapter_reply_queue *reply_q, *next;
@@ -3191,12 +3191,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _base_disable_msix - disables msix
+ * mpt3sas_base_disable_msix - disables msix
* @ioc: per adapter object
*
*/
-static void
-_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
{
if (!ioc->msix_enable)
return;
@@ -3304,8 +3304,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
goto try_ioapic;
}
}
@@ -3342,8 +3342,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
@@ -7613,14 +7613,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_make_ioc_ready - put controller in READY state
+ * mpt3sas_base_make_ioc_ready - put controller in READY state
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
* Return: 0 for success, non-zero for failure.
*/
-static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
{
u32 ioc_state;
int rc;
@@ -7897,7 +7897,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
if (ioc->chip_phys && ioc->chip) {
mpt3sas_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, SOFT_RESET);
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
}
@@ -8017,7 +8017,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_mpi = &_base_build_sg;
ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
- r = _base_make_ioc_ready(ioc, SOFT_RESET);
+ r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
if (r)
goto out_free_resources;
@@ -8471,7 +8471,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
mpt3sas_base_mask_interrupts(ioc);
- r = _base_make_ioc_ready(ioc, type);
+ r = mpt3sas_base_make_ioc_ready(ioc, type);
if (r)
goto out;
_base_clear_outstanding_commands(ioc);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index d4834c8ee9c0..0c6c3df0038d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1730,6 +1730,10 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
status, mpi_request, sz); } while (0)
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 866d118f7931..8e64a6f14542 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -11295,7 +11295,12 @@ scsih_shutdown(struct pci_dev *pdev)
_scsih_ir_shutdown(ioc);
_scsih_nvme_shutdown(ioc);
- mpt3sas_base_detach(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b07105ae7c91..d8b05d8b5470 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
- int param;
- int param_type;
+ int param = -1;
if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
- else if (attr == &dev_attr_iface_vlan_id.attr)
- param = ISCSI_NET_PARAM_VLAN_ID;
- else if (attr == &dev_attr_iface_vlan_priority.attr)
- param = ISCSI_NET_PARAM_VLAN_PRIORITY;
- else if (attr == &dev_attr_iface_vlan_enabled.attr)
- param = ISCSI_NET_PARAM_VLAN_ENABLED;
- else if (attr == &dev_attr_iface_mtu.attr)
- param = ISCSI_NET_PARAM_MTU;
- else if (attr == &dev_attr_iface_port.attr)
- param = ISCSI_NET_PARAM_PORT;
- else if (attr == &dev_attr_iface_ipaddress_state.attr)
- param = ISCSI_NET_PARAM_IPADDR_STATE;
- else if (attr == &dev_attr_iface_delayed_ack_en.attr)
- param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
- else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
- param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
- param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf.attr)
- param = ISCSI_NET_PARAM_TCP_WSF;
- else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
- param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
- else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
- param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
- else if (attr == &dev_attr_iface_cache_id.attr)
- param = ISCSI_NET_PARAM_CACHE_ID;
- else if (attr == &dev_attr_iface_redirect_en.attr)
- param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
@@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
else if (attr == &dev_attr_iface_initiator_name.attr)
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+
+ if (param != -1)
+ return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+ if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+ else if (attr == &dev_attr_iface_vlan_enabled.attr)
+ param = ISCSI_NET_PARAM_VLAN_ENABLED;
+ else if (attr == &dev_attr_iface_mtu.attr)
+ param = ISCSI_NET_PARAM_MTU;
+ else if (attr == &dev_attr_iface_port.attr)
+ param = ISCSI_NET_PARAM_PORT;
+ else if (attr == &dev_attr_iface_ipaddress_state.attr)
+ param = ISCSI_NET_PARAM_IPADDR_STATE;
+ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF;
+ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_iface_cache_id.attr)
+ param = ISCSI_NET_PARAM_CACHE_ID;
+ else if (attr == &dev_attr_iface_redirect_en.attr)
+ param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
return 0;
}
- switch (param) {
- case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
- case ISCSI_IFACE_PARAM_HDRDGST_EN:
- case ISCSI_IFACE_PARAM_DATADGST_EN:
- case ISCSI_IFACE_PARAM_IMM_DATA_EN:
- case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
- case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
- case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
- case ISCSI_IFACE_PARAM_ERL:
- case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
- case ISCSI_IFACE_PARAM_FIRST_BURST:
- case ISCSI_IFACE_PARAM_MAX_R2T:
- case ISCSI_IFACE_PARAM_MAX_BURST:
- case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
- case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
- case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
- case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
- case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
- case ISCSI_IFACE_PARAM_INITIATOR_NAME:
- param_type = ISCSI_IFACE_PARAM;
- break;
- default:
- param_type = ISCSI_NET_PARAM;
- }
-
- return t->attr_is_visible(param_type, param);
+ return t->attr_is_visible(ISCSI_NET_PARAM, param);
}
static struct attribute *iscsi_iface_attrs[] = {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b32f4ee88e79..ca1b2312d6e7 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -25,7 +25,7 @@
#include "target_core_alua.h"
static sense_reason_t
-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
@@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
}
static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ if ((flags & 0x04) || (flags & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
@@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
}
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
- if (flags[0] & 0x10) {
+ if (flags & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
@@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
*/
- if (flags[0] & 0x08) {
+ if (flags & 0x08) {
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+ ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
if (ret)
return ret;
@@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
}
static sense_reason_t
-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
u32 sectors, bool is_write)
{
- u8 protect = cdb[1] >> 5;
int sp_ops = cmd->se_sess->sup_prot_ops;
int pi_prot_type = dev->dev_attrib.pi_prot_type;
bool fabric_prot = false;
@@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
fallthrough;
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
- "PROTECT: 0x%02x\n", cdb[0], protect);
+ "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
return TCM_INVALID_CDB_FIELD;
}
@@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ ret = sbc_setup_write_same(cmd, cdb[10], ops);
if (ret)
return ret;
break;
@@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
@@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7e35eddd9eb7..26ceabe34de5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+ if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
cpu = cmd->cpuid;
else
cpu = wwn->cmd_compl_affinity;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index fdf79bcf7eb0..35d5908b5478 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
};
/* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN
+#ifdef CONFIG_WWAN_CORE
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
/* inbuf has been copied, it is safe to check for outstanding data */
schedule_work(&desc->service_outs_intr);
}
-#else /* CONFIG_WWAN */
+#else /* CONFIG_WWAN_CORE */
static void wdm_wwan_init(struct wdm_device *desc) {}
static void wdm_wwan_deinit(struct wdm_device *desc) {}
static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN */
+#endif /* CONFIG_WWAN_CORE */
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b97464498763..9618ba622a2d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
- if (ctrl->bRequestType & 0x80) {
+ if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d1efc7141333..86658a81d284 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,7 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+#define USB_PING_RESPONSE_TIME 400 /* ns */
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
@@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
}
/*
- * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
- * either U1 or U2.
+ * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
+ * U1/U2, send a PING to the device and receive a PING_RESPONSE.
+ * See USB 3.1 section C.1.5.2
*/
static void usb_set_lpm_mel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
@@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
unsigned int hub_exit_latency)
{
unsigned int total_mel;
- unsigned int device_mel;
- unsigned int hub_mel;
/*
- * Calculate the time it takes to transition all links from the roothub
- * to the parent hub into U0. The parent hub must then decode the
- * packet (hub header decode latency) to figure out which port it was
- * bound for.
- *
- * The Hub Header decode latency is expressed in 0.1us intervals (0x1
- * means 0.1us). Multiply that by 100 to get nanoseconds.
+ * tMEL1. time to transition path from host to device into U0.
+ * MEL for parent already contains the delay up to parent, so only add
+ * the exit latency for the last link (pick the slower exit latency),
+ * and the hub header decode latency. See USB 3.1 section C 2.2.1
+ * Store MEL in nanoseconds
*/
total_mel = hub_lpm_params->mel +
- (hub->descriptor->u.ss.bHubHdrDecLat * 100);
+ max(udev_exit_latency, hub_exit_latency) * 1000 +
+ hub->descriptor->u.ss.bHubHdrDecLat * 100;
/*
- * How long will it take to transition the downstream hub's port into
- * U0? The greater of either the hub exit latency or the device exit
- * latency.
- *
- * The BOS U1/U2 exit latencies are expressed in 1us intervals.
- * Multiply that by 1000 to get nanoseconds.
+ * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
+ * each link + wHubDelay for each hub. Add only for last link.
+ * tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
+ * Multiply by 2 to include it as well.
*/
- device_mel = udev_exit_latency * 1000;
- hub_mel = hub_exit_latency * 1000;
- if (device_mel > hub_mel)
- total_mel += device_mel;
- else
- total_mel += hub_mel;
+ total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
+ USB_TP_TRANSMISSION_DELAY) * 2;
+
+ /*
+ * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
+ * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
+ * to cover the delay if the PING_RESPONSE is queued behind a Max Packet
+ * Size DP.
+ * Note these delays should be added only once for the entire path, so
+ * add them to the MEL of the device connected to the roothub.
+ */
+ if (!hub->hdev->parent)
+ total_mel += USB_PING_RESPONSE_TIME + 2100;
udev_lpm_params->mel = total_mel;
}
@@ -4113,6 +4117,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
}
/*
+ * Don't allow device intiated U1/U2 if the system exit latency + one bus
+ * interval is greater than the minimum service interval of any active
+ * periodic endpoint. See USB 3.2 section 9.4.9
+ */
+static bool usb_device_may_initiate_lpm(struct usb_device *udev,
+ enum usb3_link_state state)
+{
+ unsigned int sel; /* us */
+ int i, j;
+
+ if (state == USB3_LPM_U1)
+ sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+ else if (state == USB3_LPM_U2)
+ sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+ else
+ return false;
+
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ struct usb_interface *intf;
+ struct usb_endpoint_descriptor *desc;
+ unsigned int interval;
+
+ intf = udev->actconfig->interface[i];
+ if (!intf)
+ continue;
+
+ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
+ desc = &intf->cur_altsetting->endpoint[j].desc;
+
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ interval = (1 << (desc->bInterval - 1)) * 125;
+ if (sel + 125 > interval)
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/*
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
* U1/U2 entry.
*
@@ -4184,20 +4229,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* U1/U2_ENABLE
*/
if (udev->actconfig &&
- usb_set_device_initiated_lpm(udev, state, true) == 0) {
- if (state == USB3_LPM_U1)
- udev->usb3_lpm_u1_enabled = 1;
- else if (state == USB3_LPM_U2)
- udev->usb3_lpm_u2_enabled = 1;
- } else {
- /* Don't request U1/U2 entry if the device
- * cannot transition to U1/U2.
- */
- usb_set_lpm_timeout(udev, state, 0);
- hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ usb_device_may_initiate_lpm(udev, state)) {
+ if (usb_set_device_initiated_lpm(udev, state, true)) {
+ /*
+ * Request to enable device initiated U1/U2 failed,
+ * better to turn off lpm in this case.
+ */
+ usb_set_lpm_timeout(udev, state, 0);
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ return;
+ }
}
-}
+ if (state == USB3_LPM_U1)
+ udev->usb3_lpm_u1_enabled = 1;
+ else if (state == USB3_LPM_U2)
+ udev->usb3_lpm_u2_enabled = 1;
+}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
* U1/U2 entry.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6114cf83bb44..8239fe7129dd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -501,10 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
- /* Fibocom L850-GL LTE Modem */
- { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index ab6b815e0089..483de2bbfaab 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -383,6 +383,9 @@ enum dwc2_ep0_state {
* 0 - No (default)
* 1 - Partial power down
* 2 - Hibernation
+ * @no_clock_gating: Specifies whether to avoid clock gating feature.
+ * 0 - No (use clock gating)
+ * 1 - Yes (avoid it)
* @lpm: Enable LPM support.
* 0 - No
* 1 - Yes
@@ -480,6 +483,7 @@ struct dwc2_core_params {
#define DWC2_POWER_DOWN_PARAM_NONE 0
#define DWC2_POWER_DOWN_PARAM_PARTIAL 1
#define DWC2_POWER_DOWN_PARAM_HIBERNATION 2
+ bool no_clock_gating;
bool lpm;
bool lpm_clock_gating;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index a5ab03808da6..a5c52b237e72 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
* If neither hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_gadget_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_gadget_enter_clock_gating(hsotg);
}
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index c581ee41ac81..3146df6e6510 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
return;
}
- /* Zlp for all endpoints, for ep0 only in DATA IN stage */
+ /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
if (hs_ep->send_zlp) {
- dwc2_hsotg_program_zlp(hsotg, hs_ep);
hs_ep->send_zlp = 0;
- /* transfer will be completed on next complete interrupt */
- return;
+ if (!using_desc_dma(hsotg)) {
+ dwc2_hsotg_program_zlp(hsotg, hs_ep);
+ /* transfer will be completed on next complete interrupt */
+ return;
+ }
}
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
@@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
__func__);
}
} else {
+ /* Mask GINTSTS_GOUTNAKEFF interrupt */
+ dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
+
if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
+ if (!using_dma(hsotg)) {
+ /* Wait for GINTSTS_RXFLVL interrupt */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_RXFLVL, 100)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
+ __func__);
+ } else {
+ /*
+ * Pop GLOBAL OUT NAK status packet from RxFIFO
+ * to assert GOUTNAKEFF interrupt
+ */
+ dwc2_readl(hsotg, GRXSTSP);
+ }
+ }
+
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_GOUTNAKEFF, 100))
@@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
epctl = dwc2_readl(hs, epreg);
if (value) {
+ /* Unmask GOUTNAKEFF interrupt */
+ dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
+
if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
// STALL bit will be set in GOUTNAKEFF interrupt handler
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 035d4911a3c3..2a7828971d05 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_host_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_host_enter_clock_gating(hsotg);
break;
}
@@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_host_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_host_enter_clock_gating(hsotg);
/* After entering suspend, hardware is not accessible */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 67c5eb140232..59e119345994 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
struct dwc2_core_params *p = &hsotg->params;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->no_clock_gating = true;
p->phy_utmi_width = 8;
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index dccdf13b5f9e..5991766239ba 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1279,6 +1279,7 @@ struct dwc3 {
unsigned dis_metastability_quirk:1;
unsigned dis_split_quirk:1;
+ unsigned async_callbacks:1;
u16 imod_interval;
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3cd294264372..2f9e45eed228 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
- int ret;
+ int ret = -EINVAL;
- spin_unlock(&dwc->lock);
- ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
- spin_lock(&dwc->lock);
+ if (dwc->async_callbacks) {
+ spin_unlock(&dwc->lock);
+ ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
+ spin_lock(&dwc->lock);
+ }
return ret;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index af6d7f157989..45f2bc0807e8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2585,6 +2585,16 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
return ret;
}
+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->async_callbacks = enable;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
@@ -2596,6 +2606,7 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
.udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
.vbus_draw = dwc3_gadget_vbus_draw,
+ .udc_async_callbacks = dwc3_gadget_async_callbacks,
};
/* -------------------------------------------------------------------------- */
@@ -3231,7 +3242,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->disconnect(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3240,7 +3251,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->suspend(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3249,7 +3260,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3261,7 +3272,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return;
- if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
+ if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
@@ -3585,7 +3596,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
* implemented.
*/
- if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index bffef8e47dac..281ca766698a 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1198,7 +1198,7 @@ void gserial_free_line(unsigned char port_num)
struct gs_port *port;
mutex_lock(&ports[port_num].lock);
- if (WARN_ON(!ports[port_num].port)) {
+ if (!ports[port_num].port) {
mutex_unlock(&ports[port_num].lock);
return;
}
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index a54d1cef17db..c0ca7144e512 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3853,6 +3853,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
return 0;
free_eps:
+ pm_runtime_disable(&pdev->dev);
tegra_xudc_free_eps(xudc);
free_event_ring:
tegra_xudc_free_event_ring(xudc);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 36f5bf6a0752..10b0365f3439 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup);
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- u32 status, masked_status, pcd_status = 0, cmd;
+ u32 status, current_status, masked_status, pcd_status = 0;
+ u32 cmd;
int bh;
spin_lock(&ehci->lock);
- status = ehci_readl(ehci, &ehci->regs->status);
+ status = 0;
+ current_status = ehci_readl(ehci, &ehci->regs->status);
+restart:
/* e.g. cardbus physical eject */
- if (status == ~(u32) 0) {
+ if (current_status == ~(u32) 0) {
ehci_dbg (ehci, "device removed\n");
goto dead;
}
+ status |= current_status;
/*
* We don't use STS_FLR, but some controllers don't like it to
* remain on, so mask it out along with the other status bits.
*/
- masked_status = status & (INTR_MASK | STS_FLR);
+ masked_status = current_status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
@@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* clear (just) interrupts */
ehci_writel(ehci, masked_status, &ehci->regs->status);
+
+ /* For edge interrupts, don't race with an interrupt bit being raised */
+ current_status = ehci_readl(ehci, &ehci->regs->status);
+ if (current_status & INTR_MASK)
+ goto restart;
+
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index e7a8e0609853..59cc1bc7f12f 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -153,8 +153,6 @@ struct max3421_hcd {
*/
struct urb *curr_urb;
enum scheduling_pass sched_pass;
- struct usb_device *loaded_dev; /* dev that's loaded into the chip */
- int loaded_epnum; /* epnum whose toggles are loaded */
int urb_done; /* > 0 -> no errors, < 0: errno */
size_t curr_len;
u8 hien;
@@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
* Caller must NOT hold HCD spinlock.
*/
static void
-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
- int force_toggles)
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
{
- struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
- int old_epnum, same_ep, rcvtog, sndtog;
- struct usb_device *old_dev;
+ int rcvtog, sndtog;
u8 hctl;
- old_dev = max3421_hcd->loaded_dev;
- old_epnum = max3421_hcd->loaded_epnum;
-
- same_ep = (dev == old_dev && epnum == old_epnum);
- if (same_ep && !force_toggles)
- return;
-
- if (old_dev && !same_ep) {
- /* save the old end-points toggles: */
- u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
-
- rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
- sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
-
- /* no locking: HCD (i.e., we) own toggles, don't we? */
- usb_settoggle(old_dev, old_epnum, 0, rcvtog);
- usb_settoggle(old_dev, old_epnum, 1, sndtog);
- }
/* setup new endpoint's toggle bits: */
rcvtog = usb_gettoggle(dev, epnum, 0);
sndtog = usb_gettoggle(dev, epnum, 1);
hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
- max3421_hcd->loaded_epnum = epnum;
spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
/*
@@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
* address-assignment so it's best to just always load the
* address whenever the end-point changed/was forced.
*/
- max3421_hcd->loaded_dev = dev;
spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
}
@@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb, *curr_urb = NULL;
struct max3421_ep *max3421_ep;
- int epnum, force_toggles = 0;
+ int epnum;
struct usb_host_endpoint *ep;
struct list_head *pos;
unsigned long flags;
@@ -777,7 +752,6 @@ done:
usb_settoggle(urb->dev, epnum, 0, 1);
usb_settoggle(urb->dev, epnum, 1, 1);
max3421_ep->pkt_state = PKT_STATE_SETUP;
- force_toggles = 1;
} else
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
}
@@ -785,7 +759,7 @@ done:
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
max3421_ep->last_active = max3421_hcd->frame_number;
- max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+ max3421_set_address(hcd, urb->dev, epnum);
max3421_set_speed(hcd, urb->dev);
max3421_next_transfer(hcd, 0);
return 1;
@@ -1379,6 +1353,16 @@ max3421_urb_done(struct usb_hcd *hcd)
status = 0;
urb = max3421_hcd->curr_urb;
if (urb) {
+ /* save the old end-points toggles: */
+ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+ int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+ int epnum = usb_endpoint_num(&urb->ep->desc);
+
+ /* no locking: HCD (i.e., we) own toggles, don't we? */
+ usb_settoggle(urb->dev, epnum, 0, rcvtog);
+ usb_settoggle(urb->dev, epnum, 1, sndtog);
+
max3421_hcd->curr_urb = NULL;
spin_lock_irqsave(&max3421_hcd->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e9b18fc17617..151e93c4bd57 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
* Inform the usbcore about resume-in-progress by returning
* a non-zero value even if there are no status changes.
*/
+ spin_lock_irqsave(&xhci->lock, flags);
+
status = bus_state->resuming_ports;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
- spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(ports[i]->addr);
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 1da647961c25..5923844ed821 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -207,8 +207,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
return 0;
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
- dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
- break;
+ return 0;
case RENESAS_ROM_STATUS_ERROR: /* Error State */
default: /* All other states are marked as "Reserved states" */
@@ -225,12 +224,13 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
u8 fw_state;
int err;
- /*
- * Only if device has ROM and loaded FW we can skip loading and
- * return success. Otherwise (even unknown state), attempt to load FW.
- */
- if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
- return 0;
+ /* Check if device has ROM and loaded, if so skip everything */
+ err = renesas_check_rom(pdev);
+ if (err) { /* we have rom */
+ err = renesas_check_rom_state(pdev);
+ if (!err)
+ return err;
+ }
/*
* Test if the device is actually needing the firmware. As most
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 18c2bbddf080..1c9a7957c45c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = {
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/*
+ * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
+ * load firmware, so don't encumber the xhci-pci driver with it.
+ */
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
MODULE_FIRMWARE("renesas_usb_fw.mem");
+#endif
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 83ed5089475a..1b24492bb4e5 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -86,10 +86,10 @@ static struct usb_phy *__device_to_usb_phy(struct device *dev)
list_for_each_entry(usb_phy, &phy_list, head) {
if (usb_phy->dev == dev)
- break;
+ return usb_phy;
}
- return usb_phy;
+ return NULL;
}
static void usb_phy_set_default_current(struct usb_phy *usb_phy)
@@ -150,8 +150,14 @@ static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env)
struct usb_phy *usb_phy;
char uchger_state[50] = { 0 };
char uchger_type[50] = { 0 };
+ unsigned long flags;
+ spin_lock_irqsave(&phy_lock, flags);
usb_phy = __device_to_usb_phy(dev);
+ spin_unlock_irqrestore(&phy_lock, flags);
+
+ if (!usb_phy)
+ return -ENODEV;
snprintf(uchger_state, ARRAY_SIZE(uchger_state),
"USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]);
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b5e7991dc7d9..a3c2b01ccf7b 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
+static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
+static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
if (chan) {
dmaengine_terminate_all(chan);
usbhsf_dma_unmap(pkt);
+ } else {
+ if (usbhs_pipe_is_dir_in(pipe))
+ usbhsf_rx_irq_ctrl(pipe, 0);
+ else
+ usbhsf_tx_irq_ctrl(pipe, 0);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 09b845d0da41..3c80bfbf3bec 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
- { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
- { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
+ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7608584ef4fe..0fbe253dc570 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
+#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = {
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+ .driver_info = RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index f9677a5ec31b..c35a6db993f1 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
+UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+ "LaCie",
+ "Rugged USB3-FW",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index 6eaeba9b096e..e7745d1c2a5c 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -686,6 +686,15 @@ static int stusb160x_probe(struct i2c_client *client)
return -ENODEV;
/*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This it breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
+ /*
* When both VDD and VSYS power supplies are present, the low power
* supply VSYS is selected when VSYS voltage is above 3.1 V.
* Otherwise VDD is selected.
@@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client)
typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
if (client->irq) {
- ret = stusb160x_irq_init(chip, client->irq);
- if (ret)
- goto port_unregister;
-
chip->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(chip->role_sw)) {
ret = PTR_ERR(chip->role_sw);
@@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client)
ret);
goto port_unregister;
}
+
+ ret = stusb160x_irq_init(chip, client->irq);
+ if (ret)
+ goto role_sw_put;
} else {
/*
* If Source or Dual power role, need to enable VDD supply
@@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client)
return 0;
+role_sw_put:
+ if (chip->role_sw)
+ usb_role_switch_put(chip->role_sw);
port_unregister:
typec_unregister_port(chip->port);
all_reg_disable:
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 938219bc1b4b..21b3ae25c76d 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client)
if (!fwnode)
return -ENODEV;
+ /*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
tps->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(tps->role_sw)) {
ret = PTR_ERR(tps->role_sw);
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 06fb7a93a1bd..4d5ae61580aa 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -168,21 +168,6 @@ config OSF4_COMPAT
with v4 shared libraries freely available from Compaq. If you're
going to use shared libraries from Tru64 version 5.0 or later, say N.
-config BINFMT_EM86
- tristate "Kernel support for Linux/Intel ELF binaries"
- depends on ALPHA
- help
- Say Y here if you want to be able to execute Linux/Intel ELF
- binaries just like native Alpha binaries on your Alpha machine. For
- this to work, you need to have the emulator /usr/bin/em86 in place.
-
- You can get the same functionality by saying N here and saying Y to
- "Kernel support for MISC binaries".
-
- You may answer M to compile the emulation support as a module and
- later load the module when you want to use a Linux/Intel binary. The
- module will be called binfmt_em86. If unsure, say Y.
-
config BINFMT_MISC
tristate "Kernel support for MISC binaries"
help
diff --git a/fs/Makefile b/fs/Makefile
index 9c708e1fbe8f..f98f3e691c37 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
-obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o
obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
deleted file mode 100644
index 06b9b9fddf70..000000000000
--- a/fs/binfmt_em86.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/fs/binfmt_em86.c
- *
- * Based on linux/fs/binfmt_script.c
- * Copyright (C) 1996 Martin von Löwis
- * original #!-checking implemented by tytso.
- *
- * em86 changes Copyright (C) 1997 Jim Paradis
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/binfmts.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/errno.h>
-
-
-#define EM86_INTERP "/usr/bin/em86"
-#define EM86_I_NAME "em86"
-
-static int load_em86(struct linux_binprm *bprm)
-{
- const char *i_name, *i_arg;
- char *interp;
- struct file * file;
- int retval;
- struct elfhdr elf_ex;
-
- /* Make sure this is a Linux/Intel ELF executable... */
- elf_ex = *((struct elfhdr *)bprm->buf);
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks */
- if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
- (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
- !bprm->file->f_op->mmap) {
- return -ENOEXEC;
- }
-
- /* Need to be able to load the file after exec */
- if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
- return -ENOENT;
-
- /* Unlike in the script case, we don't have to do any hairy
- * parsing to find our interpreter... it's hardcoded!
- */
- interp = EM86_INTERP;
- i_name = EM86_I_NAME;
- i_arg = NULL; /* We reserve the right to add an arg later */
-
- /*
- * Splice in (1) the interpreter's name for argv[0]
- * (2) (optional) argument to interpreter
- * (3) filename of emulated file (replace argv[0])
- *
- * This is done in reverse order, because of how the
- * user environment and arguments are stored.
- */
- remove_arg_zero(bprm);
- retval = copy_string_kernel(bprm->filename, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
- if (i_arg) {
- retval = copy_string_kernel(i_arg, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
- }
- retval = copy_string_kernel(i_name, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
-
- /*
- * OK, now restart the process with the interpreter's inode.
- * Note that we use open_exec() as the name is now in kernel
- * space, and we don't need to copy it.
- */
- file = open_exec(interp);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- bprm->interpreter = file;
- return 0;
-}
-
-static struct linux_binfmt em86_format = {
- .module = THIS_MODULE,
- .load_binary = load_em86,
-};
-
-static int __init init_em86_binfmt(void)
-{
- register_binfmt(&em86_format);
- return 0;
-}
-
-static void __exit exit_em86_binfmt(void)
-{
- unregister_binfmt(&em86_format);
-}
-
-core_initcall(init_em86_binfmt);
-module_exit(exit_em86_binfmt);
-MODULE_LICENSE("GPL");
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0c424a0cadaa..9ef4f1fc2cb0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -812,6 +812,8 @@ static void bdev_free_inode(struct inode *inode)
free_percpu(bdev->bd_stats);
kfree(bdev->bd_meta_info);
+ if (!bdev_is_partition(bdev))
+ kfree(bdev->bd_disk);
kmem_cache_free(bdev_cachep, BDEV_I(inode));
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7a8a2fc19533..78b202d198b8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots,
- bool ignore_offset)
+ bool ignore_offset, bool skip_commit_root_sem)
{
int ret;
- if (!trans)
+ if (!trans && !skip_commit_root_sem)
down_read(&fs_info->commit_root_sem);
ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
time_seq, roots, ignore_offset);
- if (!trans)
+ if (!trans && !skip_commit_root_sem)
up_read(&fs_info->commit_root_sem);
return ret;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 17abde7f794c..ff5f07f9940b 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
const u64 *extent_item_pos, bool ignore_offset);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots, bool ignore_offset);
+ u64 time_seq, struct ulist **roots, bool ignore_offset,
+ bool skip_commit_root_sem);
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
struct extent_buffer *eb_in, u64 parent,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9a023ae0f98b..30d82cdf128c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -352,7 +352,7 @@ static void end_compressed_bio_write(struct bio *bio)
btrfs_record_physical_zoned(inode, cb->start, bio);
btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
cb->start, cb->start + cb->len - 1,
- bio->bi_status == BLK_STS_OK);
+ !cb->errors);
end_compressed_writeback(inode, cb);
/* note, our inode could be gone now */
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 06bc842ecdb3..ca848b183474 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -974,7 +974,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
if (qrecord_inserted)
- btrfs_qgroup_trace_extent_post(fs_info, record);
+ btrfs_qgroup_trace_extent_post(trans, record);
return 0;
}
@@ -1069,7 +1069,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return btrfs_qgroup_trace_extent_post(trans, record);
return 0;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b117dd3b8172..a59ab7b9aea0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -209,7 +209,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
static void csum_tree_block(struct extent_buffer *buf, u8 *result)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
- const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
+ const int num_pages = num_extent_pages(buf);
const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d296483d148f..268ce58d4569 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6019,6 +6019,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ continue;
+
ret = btrfs_trim_free_extents(device, &group_trimmed);
if (ret) {
dev_failed++;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8f60314c36c5..0117d867ecf8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2992,7 +2992,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- if (ordered_extent->disk)
+ if (ordered_extent->bdev)
btrfs_rewrite_logical_zoned(ordered_extent);
btrfs_free_io_failure_record(inode, start, end);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6eb41b7c0c84..5c0f8481e25e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -190,8 +190,6 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = ret;
entry->physical = (u64)-1;
- entry->disk = NULL;
- entry->partno = (u8)-1;
ASSERT(type == BTRFS_ORDERED_REGULAR ||
type == BTRFS_ORDERED_NOCOW ||
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 566472004edd..b2d88aba8420 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -145,8 +145,7 @@ struct btrfs_ordered_extent {
* command in a workqueue context
*/
u64 physical;
- struct gendisk *disk;
- u8 partno;
+ struct block_device *bdev;
};
/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 07ec06d4e972..0fa121171ca1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord)
{
struct ulist *old_root;
u64 bytenr = qrecord->bytenr;
int ret;
- ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
+ /*
+ * We are always called in a context where we are already holding a
+ * transaction handle. Often we are called when adding a data delayed
+ * reference from btrfs_truncate_inode_items() (truncating or unlinking),
+ * in which case we will be holding a write lock on extent buffer from a
+ * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
+ * acquire fs_info->commit_root_sem, because that is a higher level lock
+ * that must be acquired before locking any extent buffers.
+ *
+ * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
+ * but we can't pass it a non-NULL transaction handle, because otherwise
+ * it would not use commit roots and would lock extent buffers, causing
+ * a deadlock if it ends up trying to read lock the same extent buffer
+ * that was previously write locked at btrfs_truncate_inode_items().
+ *
+ * So pass a NULL transaction handle to btrfs_find_all_roots() and
+ * explicitly tell it to not acquire the commit_root_sem - if we are
+ * holding a transaction handle we don't need its protection.
+ */
+ ASSERT(trans != NULL);
+
+ ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
+ false, true);
if (ret < 0) {
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_warn(fs_info,
+ trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_warn(trans->fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent",
ret);
return 0;
@@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
kfree(record);
return 0;
}
- return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return btrfs_qgroup_trace_extent_post(trans, record);
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
/* Search commit root to find old_roots */
ret = btrfs_find_all_roots(NULL, fs_info,
record->bytenr, 0,
- &record->old_roots, false);
+ &record->old_roots, false, false);
if (ret < 0)
goto cleanup;
}
@@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
* current root. It's safe inside commit_transaction().
*/
ret = btrfs_find_all_roots(trans, fs_info,
- record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
+ record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false);
if (ret < 0)
goto cleanup;
if (qgroup_to_skip) {
@@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
num_bytes = found.offset;
ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
- &roots, false);
+ &roots, false, false);
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 7283e4f549af..880e9df0dac1 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock(
* using current root, then we can move all expensive backref walk out of
* transaction committing, but not now as qgroup accounting will be wrong again.
*/
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord);
/*
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index f3137285a9e2..98b5aaba46f1 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
* quota.
*/
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
new_roots = NULL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return -EINVAL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index dc6eb088d73e..e6430ac9bbe8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5526,16 +5526,29 @@ log_extents:
spin_lock(&inode->lock);
inode->logged_trans = trans->transid;
/*
- * Don't update last_log_commit if we logged that an inode exists
- * after it was loaded to memory (full_sync bit set).
- * This is to prevent data loss when we do a write to the inode,
- * then the inode gets evicted after all delalloc was flushed,
- * then we log it exists (due to a rename for example) and then
- * fsync it. This last fsync would do nothing (not logging the
- * extents previously written).
+ * Don't update last_log_commit if we logged that an inode exists.
+ * We do this for two reasons:
+ *
+ * 1) We might have had buffered writes to this inode that were
+ * flushed and had their ordered extents completed in this
+ * transaction, but we did not previously log the inode with
+ * LOG_INODE_ALL. Later the inode was evicted and after that
+ * it was loaded again and this LOG_INODE_EXISTS log operation
+ * happened. We must make sure that if an explicit fsync against
+ * the inode is performed later, it logs the new extents, an
+ * updated inode item, etc, and syncs the log. The same logic
+ * applies to direct IO writes instead of buffered writes.
+ *
+ * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
+ * is logged with an i_size of 0 or whatever value was logged
+ * before. If later the i_size of the inode is increased by a
+ * truncate operation, the log is synced through an fsync of
+ * some other inode and then finally an explicit fsync against
+ * this inode is made, we must make sure this fsync logs the
+ * inode with the new i_size, the hole between old i_size and
+ * the new i_size, and syncs the log.
*/
- if (inode_only != LOG_INODE_EXISTS ||
- !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ if (inode_only != LOG_INODE_EXISTS)
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
}
@@ -6490,8 +6503,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* if this inode hasn't been logged and directory we're renaming it
* from hasn't been logged, we don't need to log it
*/
- if (inode->logged_trans < trans->transid &&
- (!old_dir || old_dir->logged_trans < trans->transid))
+ if (!inode_logged(trans, inode) &&
+ (!old_dir || !inode_logged(trans, old_dir)))
return;
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1e4d43ffe38b..70f94b75f25a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1078,6 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 297c0b1c0634..907c2cc45c9c 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1349,8 +1349,7 @@ void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
return;
ordered->physical = physical;
- ordered->disk = bio->bi_bdev->bd_disk;
- ordered->partno = bio->bi_bdev->bd_partno;
+ ordered->bdev = bio->bi_bdev;
btrfs_put_ordered_extent(ordered);
}
@@ -1362,18 +1361,16 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_ordered_sum *sum;
- struct block_device *bdev;
u64 orig_logical = ordered->disk_bytenr;
u64 *logical = NULL;
int nr, stripe_len;
/* Zoned devices should not have partitions. So, we can assume it is 0 */
- ASSERT(ordered->partno == 0);
- bdev = bdgrab(ordered->disk->part0);
- if (WARN_ON(!bdev))
+ ASSERT(!bdev_is_partition(ordered->bdev));
+ if (WARN_ON(!ordered->bdev))
return;
- if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
+ if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
ordered->physical, &logical, &nr,
&stripe_len)))
goto out;
@@ -1402,7 +1399,6 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
out:
kfree(logical);
- bdput(bdev);
}
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a818213c972f..9db1b39df773 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -4456,7 +4456,7 @@ bool check_session_state(struct ceph_mds_session *s)
break;
case CEPH_MDS_SESSION_CLOSING:
/* Should never reach this when we're unmounting */
- WARN_ON_ONCE(true);
+ WARN_ON_ONCE(s->s_ttl);
fallthrough;
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index f72e3b3dca69..65d1a65bfc37 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -873,8 +873,11 @@ PsxDelete:
InformationLevel) - 4;
offset = param_offset + params;
- /* Setup pointer to Request Data (inode type) */
- pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+ /* Setup pointer to Request Data (inode type).
+ * Note that SMB offsets are from the beginning of SMB which is 4 bytes
+ * in, after RFC1001 field
+ */
+ pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
pRqD->type = cpu_to_le16(type);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
@@ -1081,7 +1084,8 @@ PsxCreat:
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
- pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
pdata->Permissions = cpu_to_le64(mode);
pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1b04d6ec14dd..3781eee9360a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -220,7 +220,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DFS_UPCALL
struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
- struct dfs_cache_tgt_list tgt_list = {0};
+ struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
struct dfs_cache_tgt_iterator *tgt_it = NULL;
#endif
@@ -3130,7 +3130,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
{
int rc;
char *npath = NULL;
- struct dfs_cache_tgt_list tgt_list = {0};
+ struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
struct dfs_cache_tgt_iterator *tgt_it = NULL;
struct smb3_fs_context tmp_ctx = {NULL};
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 7c1769714609..283745592844 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -19,6 +19,7 @@
#include "cifs_debug.h"
#include "cifs_unicode.h"
#include "smb2glob.h"
+#include "dns_resolve.h"
#include "dfs_cache.h"
@@ -911,6 +912,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
err_free_it:
list_for_each_entry_safe(it, nit, head, it_list) {
+ list_del(&it->it_list);
kfree(it->it_name);
kfree(it);
}
@@ -1293,6 +1295,194 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
+static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+{
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ const char *host;
+ size_t hostlen;
+ char *ip = NULL;
+ struct sockaddr sa;
+ bool match;
+ int rc;
+
+ if (strcasecmp(s1, s2))
+ return false;
+
+ /*
+ * Resolve share's hostname and check if server address matches. Otherwise just ignore it
+ * as we could not have upcall to resolve hostname or failed to convert ip address.
+ */
+ match = true;
+ extract_unc_hostname(s1, &host, &hostlen);
+ scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+
+ rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+ __func__, (int)hostlen, host);
+ return true;
+ }
+
+ if (!cifs_convert_address(&sa, ip, strlen(ip))) {
+ cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
+ __func__, ip);
+ } else {
+ mutex_lock(&server->srv_mutex);
+ match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
+ mutex_unlock(&server->srv_mutex);
+ }
+
+ kfree(ip);
+ return match;
+}
+
+/*
+ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+ * target shares in @refs.
+ */
+static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
+ const struct dfs_info3_param *refs, int numrefs)
+{
+ struct dfs_cache_tgt_iterator *it;
+ int i;
+
+ for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
+ for (i = 0; i < numrefs; i++) {
+ if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
+ refs[i].node_name))
+ return;
+ }
+ }
+
+ cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+ for (i = 0; i < tcon->ses->chan_count; i++) {
+ spin_lock(&GlobalMid_Lock);
+ if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+ tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ }
+}
+
+/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+ const char *path = tcon->dfs_path + 1;
+ struct cifs_ses *ses;
+ struct cache_entry *ce;
+ struct dfs_info3_param *refs = NULL;
+ int numrefs = 0;
+ bool needs_refresh = false;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ int rc = 0;
+ unsigned int xid;
+
+ ses = find_ipc_from_server_path(sessions, path);
+ if (IS_ERR(ses)) {
+ cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
+ return PTR_ERR(ses);
+ }
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!IS_ERR(ce)) {
+ rc = get_targets(ce, &tl);
+ if (rc)
+ cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+ }
+ up_read(&htable_rw_lock);
+
+ if (!needs_refresh) {
+ rc = 0;
+ goto out;
+ }
+
+ xid = get_xid();
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ free_xid(xid);
+
+ /* Create or update a cache entry with the new referral */
+ if (!rc) {
+ dump_refs(refs, numrefs);
+
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce))
+ add_cache_entry_locked(refs, numrefs);
+ else if (force_refresh || cache_entry_expired(ce))
+ update_cache_entry_locked(ce, refs, numrefs);
+ up_write(&htable_rw_lock);
+
+ mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+ }
+
+out:
+ dfs_cache_free_tgts(&tl);
+ free_dfs_info_array(refs, numrefs);
+ return rc;
+}
+
+/**
+ * dfs_cache_remount_fs - remount a DFS share
+ *
+ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+ * match any of the new targets, mark it for reconnect.
+ *
+ * @cifs_sb: cifs superblock.
+ *
+ * Return zero if remounted, otherwise non-zero.
+ */
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+{
+ struct cifs_tcon *tcon;
+ struct mount_group *mg;
+ struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+ int rc;
+
+ if (!cifs_sb || !cifs_sb->master_tlink)
+ return -EINVAL;
+
+ tcon = cifs_sb_master_tcon(cifs_sb);
+ if (!tcon->dfs_path) {
+ cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+ return 0;
+ }
+
+ if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+ cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mount_group_list_lock);
+ mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
+ if (IS_ERR(mg)) {
+ mutex_unlock(&mount_group_list_lock);
+ cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+ return PTR_ERR(mg);
+ }
+ kref_get(&mg->refcount);
+ mutex_unlock(&mount_group_list_lock);
+
+ spin_lock(&mg->lock);
+ memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
+ spin_unlock(&mg->lock);
+
+ /*
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+ */
+ cifs_autodisable_serverino(cifs_sb);
+ /*
+ * Force the use of prefix path to support failover on DFS paths that resolve to targets
+ * that have different prefix paths.
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = refresh_tcon(sessions, tcon, true);
+
+ kref_put(&mg->refcount, mount_group_release);
+ return rc;
+}
+
/*
* Refresh all active dfs mounts regardless of whether they are in cache or not.
* (cache can be cleared)
@@ -1303,7 +1493,6 @@ static void refresh_mounts(struct cifs_ses **sessions)
struct cifs_ses *ses;
struct cifs_tcon *tcon, *ntcon;
struct list_head tcons;
- unsigned int xid;
INIT_LIST_HEAD(&tcons);
@@ -1321,44 +1510,8 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
- const char *path = tcon->dfs_path + 1;
- struct cache_entry *ce;
- struct dfs_info3_param *refs = NULL;
- int numrefs = 0;
- bool needs_refresh = false;
- int rc = 0;
-
list_del_init(&tcon->ulist);
-
- ses = find_ipc_from_server_path(sessions, path);
- if (IS_ERR(ses))
- goto next_tcon;
-
- down_read(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- needs_refresh = IS_ERR(ce) || cache_entry_expired(ce);
- up_read(&htable_rw_lock);
-
- if (!needs_refresh)
- goto next_tcon;
-
- xid = get_xid();
- rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
- free_xid(xid);
-
- /* Create or update a cache entry with the new referral */
- if (!rc) {
- down_write(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- if (IS_ERR(ce))
- add_cache_entry_locked(refs, numrefs);
- else if (cache_entry_expired(ce))
- update_cache_entry_locked(ce, refs, numrefs);
- up_write(&htable_rw_lock);
- }
-
-next_tcon:
- free_dfs_info_array(refs, numrefs);
+ refresh_tcon(sessions, tcon, false);
cifs_put_tcon(tcon);
}
}
diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
index b29d3ae64829..52070d1df189 100644
--- a/fs/cifs/dfs_cache.h
+++ b/fs/cifs/dfs_cache.h
@@ -13,6 +13,8 @@
#include <linux/uuid.h>
#include "cifsglob.h"
+#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+
struct dfs_cache_tgt_list {
int tl_numtgts;
struct list_head tl_list;
@@ -44,6 +46,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 553adfbcc22a..9a59d7ff9a11 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -13,6 +13,9 @@
#include <linux/magic.h>
#include <linux/security.h>
#include <net/net_namespace.h>
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dfs_cache.h"
+#endif
*/
#include <linux/ctype.h>
@@ -779,6 +782,10 @@ static int smb3_reconfigure(struct fs_context *fc)
smb3_cleanup_fs_context_contents(cifs_sb->ctx);
rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
smb3_update_mnt_flags(cifs_sb);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (!rc)
+ rc = dfs_cache_remount_fs(cifs_sb);
+#endif
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ba3c58e1f725..23d6f4d71649 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -3617,7 +3617,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
char *buf)
{
struct cifs_io_parms io_parms = {0};
- int nbytes;
+ int rc, nbytes;
struct kvec iov[2];
io_parms.netfid = cfile->fid.netfid;
@@ -3625,13 +3625,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
io_parms.tcon = tcon;
io_parms.persistent_fid = cfile->fid.persistent_fid;
io_parms.volatile_fid = cfile->fid.volatile_fid;
- io_parms.offset = off;
- io_parms.length = len;
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = buf;
- iov[1].iov_len = io_parms.length;
- return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+ while (len) {
+ io_parms.offset = off;
+ io_parms.length = len;
+ if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
+ io_parms.length = SMB2_MAX_BUFFER_SIZE;
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = buf;
+ iov[1].iov_len = io_parms.length;
+ rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+ if (rc)
+ break;
+ if (nbytes > len)
+ return -EINVAL;
+ buf += nbytes;
+ off += nbytes;
+ len -= nbytes;
+ }
+ return rc;
}
static int smb3_simple_fallocate_range(unsigned int xid,
@@ -3655,11 +3667,6 @@ static int smb3_simple_fallocate_range(unsigned int xid,
(char **)&out_data, &out_data_len);
if (rc)
goto out;
- /*
- * It is already all allocated
- */
- if (out_data_len == 0)
- goto out;
buf = kzalloc(1024 * 1024, GFP_KERNEL);
if (buf == NULL) {
@@ -3782,6 +3789,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
goto out;
}
+ if (keep_size == true) {
+ /*
+ * We can not preallocate pages beyond the end of the file
+ * in SMB2
+ */
+ if (off >= i_size_read(inode)) {
+ rc = 0;
+ goto out;
+ }
+ /*
+ * For fallocates that are partially beyond the end of file,
+ * clamp len so we only fallocate up to the end of file.
+ */
+ if (off + len > i_size_read(inode)) {
+ len = i_size_read(inode) - off;
+ }
+ }
+
if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
/*
* At this point, we are trying to fallocate an internal
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 14292dba3a12..2c2f179b6977 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
return err;
}
-static bool ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
{
struct inode *dir = page->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
- char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
unsigned limit = PAGE_SIZE;
@@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
if (!IS_ERR(page)) {
*page_addr = kmap_local_page(page);
if (unlikely(!PageChecked(page))) {
- if (PageError(page) || !ext2_check_page(page, quiet))
+ if (PageError(page) || !ext2_check_page(page, quiet,
+ *page_addr))
goto fail;
}
}
@@ -584,10 +584,10 @@ out_unlock:
* ext2_delete_entry deletes a directory entry by merging it with the
* previous entry. Page is up-to-date.
*/
-int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
+ char *kaddr)
{
struct inode *inode = page->mapping->host;
- char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
unsigned to = ((char *)dir - kaddr) +
ext2_rec_len_from_disk(dir->rec_len);
@@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
de = ext2_next_entry(de);
}
if (pde)
- from = (char*)pde - (char*)page_address(page);
+ from = (char *)pde - kaddr;
pos = page_offset(page) + from;
lock_page(page);
err = ext2_prepare_chunk(page, pos, to - from);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index b0a694820cb7..e512630cb63e 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir,
extern int ext2_make_empty(struct inode *, struct inode *);
extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
struct page **, void **res_page_addr);
-extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
+ char *kaddr);
extern int ext2_empty_dir (struct inode *);
extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 1f69b81655b6..5f6b7560eb3f 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
goto out;
}
- err = ext2_delete_entry (de, page);
+ err = ext2_delete_entry (de, page, page_addr);
ext2_put_page(page, page_addr);
if (err)
goto out;
@@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns,
old_inode->i_ctime = current_time(old_inode);
mark_inode_dirty(old_inode);
- ext2_delete_entry(old_de, old_page);
+ ext2_delete_entry(old_de, old_page, old_page_addr);
if (dir_de) {
if (old_dir != new_dir)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 06d04a74ab6c..4c3370548982 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -521,6 +521,9 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
*/
smp_mb();
+ if (IS_DAX(inode))
+ return false;
+
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 926eeb9bf4eb..cdfb1ae78a3f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -77,7 +77,7 @@ enum hugetlb_param {
static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
fsparam_u32 ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
- fsparam_u32 ("mode", Opt_mode),
+ fsparam_u32oct("mode", Opt_mode),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("pagesize", Opt_pagesize),
fsparam_string("size", Opt_size),
diff --git a/fs/internal.h b/fs/internal.h
index 3ce8edbaa3ca..82e8eb32ff3d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -61,7 +61,6 @@ extern void __init chrdev_init(void);
*/
extern const struct fs_context_operations legacy_fs_context_ops;
extern int parse_monolithic_mount_data(struct fs_context *, void *);
-extern void fc_drop_locked(struct fs_context *);
extern void vfs_clean_context(struct fs_context *fc);
extern int finish_clean_context(struct fs_context *fc);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 843d4a7bcd6e..cf086b01c6c6 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -731,7 +731,12 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
int work_flags;
unsigned long flags;
- if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
+ /*
+ * If io-wq is exiting for this task, or if the request has explicitly
+ * been marked as one that should not get executed, cancel it here.
+ */
+ if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
+ (work->flags & IO_WQ_WORK_CANCEL)) {
io_run_cancel(work, wqe);
return;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0cac361bf6b8..bf548af0426c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
{
struct io_kiocb *cur;
- io_for_each_link(cur, req)
- io_prep_async_work(cur);
+ if (req->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+ spin_unlock_irq(&ctx->completion_lock);
+ } else {
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+ }
}
static void io_queue_async_work(struct io_kiocb *req)
@@ -1294,6 +1303,17 @@ static void io_queue_async_work(struct io_kiocb *req)
/* init ->work of the whole link before punting */
io_prep_async_link(req);
+
+ /*
+ * Not expected to happen, but if we do have a bug where this _can_
+ * happen, catch it here and ensure the request is marked as
+ * canceled. That will make io-wq go through the usual work cancel
+ * procedure rather than attempt to run this request (or create a new
+ * worker for it).
+ */
+ if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+ req->work.flags |= IO_WQ_WORK_CANCEL;
+
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
io_wq_enqueue(tctx->io_wq, &req->work);
@@ -1939,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
node = next;
}
if (wq_list_empty(&tctx->task_list)) {
+ spin_lock_irq(&tctx->task_lock);
clear_bit(0, &tctx->task_state);
- if (wq_list_empty(&tctx->task_list))
+ if (wq_list_empty(&tctx->task_list)) {
+ spin_unlock_irq(&tctx->task_lock);
break;
+ }
+ spin_unlock_irq(&tctx->task_lock);
/* another tctx_task_work() is enqueued, yield */
if (test_and_set_bit(0, &tctx->task_state))
break;
@@ -2036,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
io_req_task_work_add(req);
}
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+ req->io_task_work.func = io_queue_async_work;
+ io_req_task_work_add(req);
+}
+
static inline void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = io_req_find_next(req);
@@ -2205,7 +2235,7 @@ static inline bool io_run_task_work(void)
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
- struct list_head *done)
+ struct list_head *done, bool resubmit)
{
struct req_batch rb;
struct io_kiocb *req;
@@ -2220,11 +2250,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN &&
+ if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
req_ref_get(req);
- io_queue_async_work(req);
+ io_req_task_queue_reissue(req);
continue;
}
@@ -2244,7 +2274,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
- long min)
+ long min, bool resubmit)
{
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
@@ -2287,7 +2317,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
if (!list_empty(&done))
- io_iopoll_complete(ctx, nr_events, &done);
+ io_iopoll_complete(ctx, nr_events, &done, resubmit);
return ret;
}
@@ -2305,7 +2335,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
while (!list_empty(&ctx->iopoll_list)) {
unsigned int nr_events = 0;
- io_do_iopoll(ctx, &nr_events, 0);
+ io_do_iopoll(ctx, &nr_events, 0, false);
/* let it sleep and repeat later if can't complete a request */
if (nr_events == 0)
@@ -2367,7 +2397,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, &nr_events, min);
+ ret = io_do_iopoll(ctx, &nr_events, min, true);
} while (!ret && nr_events < min && !need_resched());
out:
mutex_unlock(&ctx->uring_lock);
@@ -2417,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
+ /*
+ * Play it safe and assume not safe to re-import and reissue if we're
+ * not in the original thread group (or in task context).
+ */
+ if (!same_thread_group(req->task, current) || !in_task())
+ return false;
return true;
}
#else
@@ -2747,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
req_ref_get(req);
- io_queue_async_work(req);
+ io_req_task_queue_reissue(req);
} else {
int cflags = 0;
@@ -4802,6 +4838,7 @@ IO_NETOP_FN(recv);
struct io_poll_table {
struct poll_table_struct pt;
struct io_kiocb *req;
+ int nr_entries;
int error;
};
@@ -4902,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
if (req->poll.events & EPOLLONESHOT)
flags = 0;
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
}
@@ -4925,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
done = io_poll_complete(req, req->result);
if (done) {
+ io_poll_remove_double(req);
hash_del(&req->hash_node);
} else {
req->result = 0;
@@ -4995,11 +5032,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct io_kiocb *req = pt->req;
/*
- * If poll->head is already set, it's because the file being polled
- * uses multiple waitqueues for poll handling (eg one for read, one
- * for write). Setup a separate io_poll_iocb if this happens.
+ * The file being polled uses multiple waitqueues for poll handling
+ * (e.g. one for read, one for write). Setup a separate io_poll_iocb
+ * if this happens.
*/
- if (unlikely(poll->head)) {
+ if (unlikely(pt->nr_entries)) {
struct io_poll_iocb *poll_one = poll;
/* already have a 2nd entry, fail a third attempt */
@@ -5027,7 +5064,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
*poll_ptr = poll;
}
- pt->error = 0;
+ pt->nr_entries++;
poll->head = head;
if (poll->events & EPOLLEXCLUSIVE)
@@ -5104,11 +5141,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->pt._key = mask;
ipt->req = req;
- ipt->error = -EINVAL;
+ ipt->error = 0;
+ ipt->nr_entries = 0;
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
+ if (unlikely(!ipt->nr_entries) && !ipt->error)
+ ipt->error = -EINVAL;
spin_lock_irq(&ctx->completion_lock);
+ if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
+ io_poll_remove_double(req);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
if (unlikely(list_empty(&poll->wait.entry))) {
@@ -5179,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret || ipt.error) {
- io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
if (ret)
return IO_APOLL_READY;
@@ -6792,7 +6833,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
mutex_lock(&ctx->uring_lock);
if (!list_empty(&ctx->iopoll_list))
- io_do_iopoll(ctx, &nr_events, 0);
+ io_do_iopoll(ctx, &nr_events, 0, true);
/*
* Don't submit if refs are dying, good for io_uring_register(),
@@ -7899,15 +7940,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
struct io_wq_data data;
unsigned int concurrency;
+ mutex_lock(&ctx->uring_lock);
hash = ctx->hash_map;
if (!hash) {
hash = kzalloc(sizeof(*hash), GFP_KERNEL);
- if (!hash)
+ if (!hash) {
+ mutex_unlock(&ctx->uring_lock);
return ERR_PTR(-ENOMEM);
+ }
refcount_set(&hash->refs, 1);
init_waitqueue_head(&hash->wait);
ctx->hash_map = hash;
}
+ mutex_unlock(&ctx->uring_lock);
data.hash = hash;
data.task = task;
@@ -7981,9 +8026,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
f = fdget(p->wq_fd);
if (!f.file)
return -ENXIO;
- fdput(f);
- if (f.file->f_op != &io_uring_fops)
+ if (f.file->f_op != &io_uring_fops) {
+ fdput(f);
return -EINVAL;
+ }
+ fdput(f);
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct task_struct *tsk;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 775657943057..54d7843c0211 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
}
}
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ * is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+ u64 start, u64 len)
+{
+ int ret;
+ u64 start_block, end_block, nr_blocks;
+ u64 p_block, offset;
+ u32 cluster, p_cluster, nr_clusters;
+ struct super_block *sb = inode->i_sb;
+ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+ if (start + len < end)
+ end = start + len;
+
+ start_block = ocfs2_blocks_for_bytes(sb, start);
+ end_block = ocfs2_blocks_for_bytes(sb, end);
+ nr_blocks = end_block - start_block;
+ if (!nr_blocks)
+ return 0;
+
+ cluster = ocfs2_bytes_to_clusters(sb, start);
+ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+ &nr_clusters, NULL);
+ if (ret)
+ return ret;
+ if (!p_cluster)
+ return 0;
+
+ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
+ loff_t isize = i_size_read(inode);
/*
* The "start" and "end" values are NOT necessarily part of
@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
goto out;
+ /* No page cache for EOF blocks, issue zero out to disk. */
+ if (end > isize) {
+ /*
+ * zeroout eof blocks in last cluster starting from
+ * "isize" even "start" > "isize" because it is
+ * complicated to zeroout just at "start" as "start"
+ * may be not aligned with block size, buffer write
+ * would be required to do that, but out of eof buffer
+ * write is not supported.
+ */
+ ret = ocfs2_zeroout_partial_cluster(inode, isize,
+ end - isize);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ if (start >= isize)
+ goto out;
+ end = isize;
+ }
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -1856,45 +1916,6 @@ out:
}
/*
- * zero out partial blocks of one cluster.
- *
- * start: file offset where zero starts, will be made upper block aligned.
- * len: it will be trimmed to the end of current cluster if "start + len"
- * is bigger than it.
- */
-static int ocfs2_zeroout_partial_cluster(struct inode *inode,
- u64 start, u64 len)
-{
- int ret;
- u64 start_block, end_block, nr_blocks;
- u64 p_block, offset;
- u32 cluster, p_cluster, nr_clusters;
- struct super_block *sb = inode->i_sb;
- u64 end = ocfs2_align_bytes_to_clusters(sb, start);
-
- if (start + len < end)
- end = start + len;
-
- start_block = ocfs2_blocks_for_bytes(sb, start);
- end_block = ocfs2_blocks_for_bytes(sb, end);
- nr_blocks = end_block - start_block;
- if (!nr_blocks)
- return 0;
-
- cluster = ocfs2_bytes_to_clusters(sb, start);
- ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
- &nr_clusters, NULL);
- if (ret)
- return ret;
- if (!p_cluster)
- return 0;
-
- offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
- p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
- return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
-}
-
-/*
* Parts of this function taken from xfs_change_file_space()
*/
static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- orig_isize = i_size_read(inode);
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
- sr->l_start += orig_isize;
+ sr->l_start += i_size_read(inode);
break;
default:
ret = -EINVAL;
@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
ret = -EINVAL;
}
+ orig_isize = i_size_read(inode);
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
diff --git a/fs/pipe.c b/fs/pipe.c
index bfd946a9ad01..9ef4231cce61 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -429,20 +429,20 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
#endif
/*
- * Only wake up if the pipe started out empty, since
- * otherwise there should be no readers waiting.
+ * Epoll nonsensically wants a wakeup whether the pipe
+ * was already empty or not.
*
* If it wasn't empty we try to merge new data into
* the last buffer.
*
* That naturally merges small writes, but it also
- * page-aligs the rest of the writes for large writes
+ * page-aligns the rest of the writes for large writes
* spanning multiple pages.
*/
head = pipe->head;
- was_empty = pipe_empty(head, pipe->tail);
+ was_empty = true;
chars = total_len & (PAGE_SIZE-1);
- if (chars && !was_empty) {
+ if (chars && !pipe_empty(head, pipe->tail)) {
unsigned int mask = pipe->ring_size - 1;
struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 476a7ff49482..ef42729216d1 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path)
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
+static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih)
+{
+ struct reiserfs_de_head *deh;
+ int i;
+
+ deh = B_I_DEH(bh, ih);
+ for (i = 0; i < ih_entry_count(ih); i++) {
+ if (deh_location(&deh[i]) > ih_item_len(ih)) {
+ reiserfs_warning(NULL, "reiserfs-5094",
+ "directory entry location seems wrong %h",
+ &deh[i]);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
@@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
"(second one): %h", ih);
return 0;
}
- if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
- reiserfs_warning(NULL, "reiserfs-5093",
- "item entry count seems wrong %h",
- ih);
- return 0;
+ if (is_direntry_le_ih(ih)) {
+ if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) {
+ reiserfs_warning(NULL, "reiserfs-5093",
+ "item entry count seems wrong %h",
+ ih);
+ return 0;
+ }
+ return has_valid_deh_location(bh, ih);
}
prev_location = ih_location(ih);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 3ffafc73acf0..58481f8d63d5 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
unlock_new_inode(root_inode);
}
+ if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) ||
+ !root_inode->i_size) {
+ SWARN(silent, s, "", "corrupt root inode, run fsck");
+ iput(root_inode);
+ errval = -EUCLEAN;
+ goto error;
+ }
+
s->s_root = d_make_root(root_inode);
if (!s->s_root)
goto error;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f6e0f0c0d0e5..5c2d806e6ae5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
- __u64 *start, __u64 len)
+ __u64 start, __u64 len)
{
__u64 task_size = mm->task_size;
- *start = untagged_addr(*start);
-
- if (*start & ~PAGE_MASK)
+ if (start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
- if (*start < mmap_min_addr)
+ if (start < mmap_min_addr)
return -EINVAL;
- if (*start >= task_size)
+ if (start >= task_size)
return -EINVAL;
- if (len > task_size - *start)
+ if (len > task_size - start)
return -EINVAL;
return 0;
}
@@ -1316,7 +1314,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vm_flags |= VM_UFFD_MINOR;
}
- ret = validate_range(mm, &uffdio_register.range.start,
+ ret = validate_range(mm, uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@@ -1522,7 +1520,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
- ret = validate_range(mm, &uffdio_unregister.start,
+ ret = validate_range(mm, uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@@ -1671,7 +1669,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
+ ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@@ -1711,7 +1709,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
+ ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@@ -1768,7 +1766,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
+ ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
@@ -1818,7 +1816,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
sizeof(struct uffdio_writeprotect)))
return -EFAULT;
- ret = validate_range(ctx->mm, &uffdio_wp.range.start,
+ ret = validate_range(ctx->mm, uffdio_wp.range.start,
uffdio_wp.range.len);
if (ret)
return ret;
@@ -1866,7 +1864,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
sizeof(uffdio_continue) - (sizeof(__s64))))
goto out;
- ret = validate_range(ctx->mm, &uffdio_continue.range.start,
+ ret = validate_range(ctx->mm, uffdio_continue.range.start,
uffdio_continue.range.len);
if (ret)
goto out;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1ae993fee4a5..13d93371790e 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -707,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
- *
- * FIXME: Due to above requirement there is a window that may invalidate @adev
- * and next iteration will use a dangling pointer, e.g. in the case of a
- * hotplug event. That said, the caller should ensure that this will never
- * happen.
*/
#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \
@@ -725,7 +720,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
static inline void acpi_dev_put(struct acpi_device *adev)
{
- put_device(&adev->dev);
+ if (adev)
+ put_device(&adev->dev);
}
struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 10100a4bbe2a..afb27cb6a7bd 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3177181c4326..d3afea47ade6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -57,7 +57,7 @@ struct blk_keyslot_manager;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
+#define BLKCG_MAX_POLS 6
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index a9db1eae6796..ae3ac3a2018c 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b847e1ccd10f..5424124dbe36 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -354,8 +354,8 @@ struct bpf_insn_aux_data {
};
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int sanitize_stack_off; /* stack slot to be cleared */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
@@ -429,6 +429,7 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ bool explore_alu_limits;
bool allow_ptr_leaks;
bool allow_uninit_stack;
bool allow_ptr_to_map_access;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ba36989f711a..ff698c9d1c94 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -73,6 +73,11 @@ struct ctl_table_header;
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index e2bc16300c82..6b54982fc5f3 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -141,6 +141,7 @@ extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param_source(struct fs_context *fc,
struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
/*
* sget() wrappers to be called from the ->get_tree() op.
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 8c6e8e996c87..d9a606a9fc64 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
VM_BUG_ON(offset + len > PAGE_SIZE);
memcpy(to + offset, from, len);
+ flush_dcache_page(page);
kunmap_local(to);
}
static inline void memzero_page(struct page *page, size_t offset, size_t len)
{
- char *addr = kmap_atomic(page);
+ char *addr = kmap_local_page(page);
memset(addr + offset, 0, len);
- kunmap_atomic(addr);
+ flush_dcache_page(page);
+ kunmap_local(addr);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 25e2b4e80502..aee8ff4739b1 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
/* Trace interface for clients */
ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cbf46f56d105..4a53c3ca86bd 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -209,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -220,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/**
* for_each_reserved_mem_range - iterate over all reserved memblock areas
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 96f319099744..14ab0c0bc924 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
+}
+
+static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
+{
+ if (msg->skb)
+ sock_drop(psock->sk, msg->skb);
+ kfree(msg);
+}
+
static inline void sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
spin_lock_bh(&psock->ingress_lock);
- list_add_tail(&msg->list, &psock->ingress_msg);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ list_add_tail(&msg->list, &psock->ingress_msg);
+ else
+ drop_sk_msg(psock, msg);
spin_unlock_bh(&psock->ingress_lock);
}
@@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
psock->psock_update_sk_prot(sk, psock, true);
}
-static inline void sk_psock_set_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- return test_bit(bit, &psock->state);
-}
-
static inline struct sk_psock *sk_psock_get(struct sock *sk)
{
struct sk_psock *psock;
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index c0f0a13ed818..49aa79c7b278 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -15,9 +15,11 @@
#include <linux/if_ether.h>
/* Lengths of frame formats */
-#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
-#define LLC_PDU_LEN_S 4
-#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
/* Known SAP addresses */
#define LLC_GLOBAL_SAP 0xFF
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
@@ -50,9 +52,10 @@
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
#define LLC_PDU_TYPE_MASK 0x03
-#define LLC_PDU_TYPE_I 0 /* first bit */
-#define LLC_PDU_TYPE_S 1 /* first two bits */
-#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
#define LLC_PDU_TYPE_IS_I(pdu) \
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
u8 ssap, u8 dsap, u8 cr)
{
- const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+ int hlen = 4; /* default value for I and S types */
struct llc_pdu_un *pdu;
+ switch (type) {
+ case LLC_PDU_TYPE_U:
+ hlen = 3;
+ break;
+ case LLC_PDU_TYPE_U_XID:
+ hlen = 6;
+ break;
+ }
+
skb_push(skb, hlen);
skb_reset_network_header(skb);
pdu = llc_pdu_un_hdr(skb);
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
xid_info->type = svcs_supported;
xid_info->rw = rx_window << 1; /* size of receive window */
- skb_put(skb, sizeof(struct llc_xid_info));
+
+ /* no need to push/put since llc_pdu_header_init() has already
+ * pushed 3 + 3 bytes
+ */
}
/**
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 32fc4a309df5..651bba654d77 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -984,6 +984,7 @@ struct sctp_transport {
} cacc;
struct {
+ __u32 last_rtx_chunks;
__u16 pmtu;
__u16 probe_size;
__u16 probe_high;
@@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
-void sctp_transport_pl_send(struct sctp_transport *t);
-void sctp_transport_pl_recv(struct sctp_transport *t);
+bool sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
/* This is the structure we use to queue packets as they come into
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 675849d07284..8e6dd8a257c5 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -712,6 +712,12 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
+ /* This flag will reorder stop sequence. By enabling this flag
+ * DMA controller stop sequence will be invoked first followed by
+ * CPU DAI driver stop sequence
+ */
+ unsigned int stop_dma_first:1;
+
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
#endif
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index e33997b4d750..edc346a77c91 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _USR_IDXD_H_
#define _USR_IDXD_H_
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index 26b638a7ad97..a7085e092d34 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
/*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9b1577498373..b1a5fc04492b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -32,6 +32,8 @@
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/log2.h>
+
+#include <asm/barrier.h>
#include <asm/unaligned.h>
/* Registers */
@@ -1377,6 +1379,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
/* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+ [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1621,7 +1624,21 @@ out:
COND_JMP(s, JSGE, >=)
COND_JMP(s, JSLE, <=)
#undef COND_JMP
- /* STX and ST and LDX*/
+ /* ST, STX and LDX*/
+ ST_NOSPEC:
+ /* Speculation barrier for mitigating Speculative Store Bypass.
+ * In case of arm64, we rely on the firmware mitigation as
+ * controlled via the ssbd kernel parameter. Whenever the
+ * mitigation is enabled, it works for all of the kernel code
+ * with no need to provide any additional instructions here.
+ * In case of x86, we use 'lfence' insn for mitigation. We
+ * reuse preexisting logic from Spectre v1 mitigation that
+ * happens to produce the required code on x86 for v4 as well.
+ */
+#ifdef CONFIG_X86
+ barrier_nospec();
+#endif
+ CONT;
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index bbfc6bb79240..ca3cd9aaa6ce 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
}
} else if (class == BPF_ST) {
- if (BPF_MODE(insn->code) != BPF_MEM) {
+ if (BPF_MODE(insn->code) == BPF_MEM) {
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+ insn->code,
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->dst_reg,
+ insn->off, insn->imm);
+ } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+ verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+ } else {
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
- return;
}
- verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
- insn->code,
- bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
- insn->dst_reg,
- insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 475c28e21128..5ea2238a6656 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2667,6 +2667,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0)
reg = &cur->regs[value_regno];
+ if (!env->bypass_spec_v4) {
+ bool sanitize = reg && is_spillable_regtype(reg->type);
+
+ for (i = 0; i < size; i++) {
+ if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+ sanitize = true;
+ break;
+ }
+ }
+
+ if (sanitize)
+ env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+ }
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
@@ -2689,47 +2702,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
-
if (state != cur && reg->type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
-
- if (!env->bypass_spec_v4) {
- bool sanitize = false;
-
- if (state->stack[spi].slot_type[0] == STACK_SPILL &&
- register_is_const(&state->stack[spi].spilled_ptr))
- sanitize = true;
- for (i = 0; i < BPF_REG_SIZE; i++)
- if (state->stack[spi].slot_type[i] == STACK_MISC) {
- sanitize = true;
- break;
- }
- if (sanitize) {
- int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
- int soff = (-spi - 1) * BPF_REG_SIZE;
-
- /* detected reuse of integer stack slot with a pointer
- * which means either llvm is reusing stack slot or
- * an attacker is trying to exploit CVE-2018-3639
- * (speculative store bypass)
- * Have to sanitize that slot with preemptive
- * store of zero.
- */
- if (*poff && *poff != soff) {
- /* disallow programs where single insn stores
- * into two different stack slots, since verifier
- * cannot sanitize them
- */
- verbose(env,
- "insn %d cannot access two stack slots fp%d and fp%d",
- insn_idx, *poff, soff);
- return -EINVAL;
- }
- *poff = soff;
- }
- }
save_register_state(state, spi, reg);
} else {
u8 type = STACK_MISC;
@@ -6804,6 +6780,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+ /* Limit pruning on unknown scalars to enable deep search for
+ * potential masking differences from other program paths.
+ */
+ if (!off_is_imm)
+ env->explore_alu_limits = true;
}
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
@@ -10207,8 +10189,8 @@ next:
}
/* Returns true if (rold safe implies rcur safe) */
-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
- struct bpf_id_pair *idmap)
+static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+ struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
{
bool equal;
@@ -10234,6 +10216,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
switch (rold->type) {
case SCALAR_VALUE:
+ if (env->explore_alu_limits)
+ return false;
if (rcur->type == SCALAR_VALUE) {
if (!rold->precise && !rcur->precise)
return true;
@@ -10324,9 +10308,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
}
-static bool stacksafe(struct bpf_func_state *old,
- struct bpf_func_state *cur,
- struct bpf_id_pair *idmap)
+static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ struct bpf_func_state *cur, struct bpf_id_pair *idmap)
{
int i, spi;
@@ -10371,9 +10354,8 @@ static bool stacksafe(struct bpf_func_state *old,
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue;
- if (!regsafe(&old->stack[spi].spilled_ptr,
- &cur->stack[spi].spilled_ptr,
- idmap))
+ if (!regsafe(env, &old->stack[spi].spilled_ptr,
+ &cur->stack[spi].spilled_ptr, idmap))
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
@@ -10430,10 +10412,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
for (i = 0; i < MAX_BPF_REG; i++)
- if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
+ if (!regsafe(env, &old->regs[i], &cur->regs[i],
+ env->idmap_scratch))
return false;
- if (!stacksafe(old, cur, env->idmap_scratch))
+ if (!stacksafe(env, old, cur, env->idmap_scratch))
return false;
if (!refsafe(old, cur))
@@ -12198,35 +12181,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
bpf_convert_ctx_access_t convert_ctx_access;
+ bool ctx_access;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
- insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
+ insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
type = BPF_READ;
- else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_DW))
+ ctx_access = true;
+ } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
type = BPF_WRITE;
- else
+ ctx_access = BPF_CLASS(insn->code) == BPF_STX;
+ } else {
continue;
+ }
if (type == BPF_WRITE &&
- env->insn_aux_data[i + delta].sanitize_stack_off) {
+ env->insn_aux_data[i + delta].sanitize_stack_spill) {
struct bpf_insn patch[] = {
- /* Sanitize suspicious stack slot with zero.
- * There are no memory dependencies for this store,
- * since it's only using frame pointer and immediate
- * constant of zero
- */
- BPF_ST_MEM(BPF_DW, BPF_REG_FP,
- env->insn_aux_data[i + delta].sanitize_stack_off,
- 0),
- /* the original STX instruction will immediately
- * overwrite the same stack slot with appropriate value
- */
*insn,
+ BPF_ST_NOSPEC(),
};
cnt = ARRAY_SIZE(patch);
@@ -12240,6 +12221,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
continue;
}
+ if (!ctx_access)
+ continue;
+
switch (env->insn_aux_data[i + delta].ptr_type) {
case PTR_TO_CTX:
if (!ops->convert_ctx_access)
@@ -13093,37 +13077,6 @@ static void free_states(struct bpf_verifier_env *env)
}
}
-/* The verifier is using insn_aux_data[] to store temporary data during
- * verification and to store information for passes that run after the
- * verification like dead code sanitization. do_check_common() for subprogram N
- * may analyze many other subprograms. sanitize_insn_aux_data() clears all
- * temporary data after do_check_common() finds that subprogram N cannot be
- * verified independently. pass_cnt counts the number of times
- * do_check_common() was run and insn->aux->seen tells the pass number
- * insn_aux_data was touched. These variables are compared to clear temporary
- * data from failed pass. For testing and experiments do_check_common() can be
- * run multiple times even when prior attempt to verify is unsuccessful.
- *
- * Note that special handling is needed on !env->bypass_spec_v1 if this is
- * ever called outside of error path with subsequent program rejection.
- */
-static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
-{
- struct bpf_insn *insn = env->prog->insnsi;
- struct bpf_insn_aux_data *aux;
- int i, class;
-
- for (i = 0; i < env->prog->len; i++) {
- class = BPF_CLASS(insn[i].code);
- if (class != BPF_LDX && class != BPF_STX)
- continue;
- aux = &env->insn_aux_data[i];
- if (aux->seen != env->pass_cnt)
- continue;
- memset(aux, 0, offsetof(typeof(*aux), orig_idx));
- }
-}
-
static int do_check_common(struct bpf_verifier_env *env, int subprog)
{
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
@@ -13200,9 +13153,6 @@ out:
if (!ret && pop_log)
bpf_vlog_reset(&env->log, 0);
free_states(env);
- if (ret)
- /* clean aux data in case subprog was rejected */
- sanitize_insn_aux_data(env);
return ret;
}
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 8d6bf56ed77a..de2c432dee20 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1221,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc)
ret = cgroup_do_get_tree(fc);
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
- struct super_block *sb = fc->root->d_sb;
- dput(fc->root);
- deactivate_locked_super(sb);
+ fc_drop_locked(fc);
ret = 1;
}
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index 910ae69cae77..af4a6ef48ce0 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -5,6 +5,13 @@
*/
#include <linux/dma-map-ops.h>
+static struct page *dma_common_vaddr_to_page(void *cpu_addr)
+{
+ if (is_vmalloc_addr(cpu_addr))
+ return vmalloc_to_page(cpu_addr);
+ return virt_to_page(cpu_addr);
+}
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+ page_to_pfn(page) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index e4163042c4d6..cf6acab78538 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void)
*
* Creates the thread if it does not exist.
*/
-static inline void idle_init(unsigned int cpu)
+static __always_inline void idle_init(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 29a5e54e6e10..517be7fd175e 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
if (!p)
goto out;
+ /* Protect timer list r/w in arm_timer() */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL))
+ goto out;
+
/*
* Fetch the current sample and update the timer's expiry time.
*/
@@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
bump_cpu_timer(timer, now);
- /* Protect timer list r/w in arm_timer() */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL))
- goto out;
-
/*
* Now re-arm for the new expiry time.
*/
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3fadb58fc9d7..9eb11c2209e5 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -207,6 +207,7 @@ struct timer_base {
unsigned int cpu;
bool next_expiry_recalc;
bool is_idle;
+ bool timers_pending;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
struct hlist_head vectors[WHEEL_SIZE];
} ____cacheline_aligned;
@@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
* can reevaluate the wheel:
*/
base->next_expiry = bucket_expiry;
+ base->timers_pending = true;
base->next_expiry_recalc = false;
trigger_dyntick_cpu(base, timer);
}
@@ -1582,6 +1584,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
}
base->next_expiry_recalc = false;
+ base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
return next;
}
@@ -1633,7 +1636,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
- bool is_max_delta;
/*
* Pretend that there is no timer pending if the cpu is offline.
@@ -1646,7 +1648,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (base->next_expiry_recalc)
base->next_expiry = __next_timer_interrupt(base);
nextevt = base->next_expiry;
- is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
/*
* We have a fresh next event. Check whether we can forward the
@@ -1664,7 +1665,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
expires = basem;
base->is_idle = false;
} else {
- if (!is_max_delta)
+ if (base->timers_pending)
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/*
* If we expect to sleep more than a tick, mark the base idle.
@@ -1947,6 +1948,7 @@ int timers_prepare_cpu(unsigned int cpu)
base = per_cpu_ptr(&timer_bases[b], cpu);
base->clk = jiffies;
base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+ base->timers_pending = false;
base->is_idle = false;
}
return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e6fb3e6e1ffc..7b180f61e6d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5985,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
* infrastructure to do the synchronization, thus we must do it
* ourselves.
*/
- synchronize_rcu_tasks_rude();
+ if (old_hash != EMPTY_HASH)
+ synchronize_rcu_tasks_rude();
free_ftrace_hash(old_hash);
}
@@ -7544,7 +7545,7 @@ int ftrace_is_dead(void)
*/
int register_ftrace_function(struct ftrace_ops *ops)
{
- int ret = -1;
+ int ret;
ftrace_ops_init(ops);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d1463eac11a3..e592d1df6f88 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
if (unlikely(!head))
return true;
- return reader->read == rb_page_commit(reader) &&
- (commit == reader ||
- (commit == head &&
- head->read == rb_page_commit(commit)));
+ /* Reader should exhaust content in reader page */
+ if (reader->read != rb_page_commit(reader))
+ return false;
+
+ /*
+ * If writers are committing on the reader page, knowing all
+ * committed content has been read, the ring buffer is empty.
+ */
+ if (commit == reader)
+ return true;
+
+ /*
+ * If writers are committing on a page other than reader page
+ * and head page, there should always be content to read.
+ */
+ if (commit != head)
+ return false;
+
+ /*
+ * Writers are committing on the head page, we just need
+ * to care about there're committed data, and the reader will
+ * swap reader page with head page when it is to read data.
+ */
+ return rb_page_commit(commit) == 0;
}
/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8b80b5bab71..c59dd35a6da5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5609,6 +5609,10 @@ static const char readme_msg[] =
"\t [:name=histname1]\n"
"\t [:<handler>.<action>]\n"
"\t [if <filter>]\n\n"
+ "\t Note, special fields can be used as well:\n"
+ "\t common_timestamp - to record current timestamp\n"
+ "\t common_cpu - to record the CPU the event happened on\n"
+ "\n"
"\t When a matching event is hit, an entry is added to a hash\n"
"\t table using the key(s) and value(s) named, and the value of a\n"
"\t sum called 'hitcount' is incremented. Keys and values\n"
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 16a9dfc9fffc..34325f41ebc0 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1111,7 +1111,7 @@ static const char *hist_field_name(struct hist_field *field,
field->flags & HIST_FIELD_FL_ALIAS)
field_name = hist_field_name(field->operands[0], ++level);
else if (field->flags & HIST_FIELD_FL_CPU)
- field_name = "cpu";
+ field_name = "common_cpu";
else if (field->flags & HIST_FIELD_FL_EXPR ||
field->flags & HIST_FIELD_FL_VAR_REF) {
if (field->system) {
@@ -1991,14 +1991,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
hist_data->enable_timestamps = true;
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
hist_data->attrs->ts_in_usecs = true;
- } else if (strcmp(field_name, "cpu") == 0)
+ } else if (strcmp(field_name, "common_cpu") == 0)
*flags |= HIST_FIELD_FL_CPU;
else {
field = trace_find_event_field(file->event_call, field_name);
if (!field || !field->size) {
- hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
- field = ERR_PTR(-EINVAL);
- goto out;
+ /*
+ * For backward compatibility, if field_name
+ * was "cpu", then we treat this the same as
+ * common_cpu.
+ */
+ if (strcmp(field_name, "cpu") == 0) {
+ *flags |= HIST_FIELD_FL_CPU;
+ } else {
+ hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
+ errpos(field_name));
+ field = ERR_PTR(-EINVAL);
+ goto out;
+ }
}
}
out:
@@ -5085,7 +5095,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
seq_printf(m, "%s=", hist_field->var.name);
if (hist_field->flags & HIST_FIELD_FL_CPU)
- seq_puts(m, "cpu");
+ seq_puts(m, "common_cpu");
else if (field_name) {
if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
hist_field->flags & HIST_FIELD_FL_ALIAS)
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 2ac75eb6aa86..9315fc03e303 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
dyn_event_init(&event->devent, &synth_event_ops);
for (i = 0, j = 0; i < n_fields; i++) {
+ fields[i]->field_pos = i;
event->fields[i] = fields[i];
- if (fields[i]->is_dynamic) {
- event->dynamic_fields[j] = fields[i];
- event->dynamic_fields[j]->field_pos = i;
+ if (fields[i]->is_dynamic)
event->dynamic_fields[j++] = fields[i];
- event->n_dynamic_fields++;
- }
}
+ event->n_dynamic_fields = j;
event->n_fields = n_fields;
out:
return event;
diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
index 6e146b959dcd..4007fe95cf42 100644
--- a/kernel/trace/trace_synth.h
+++ b/kernel/trace/trace_synth.h
@@ -14,10 +14,10 @@ struct synth_field {
char *name;
size_t size;
unsigned int offset;
+ unsigned int field_pos;
bool is_signed;
bool is_string;
bool is_dynamic;
- bool field_pos;
};
struct synth_event {
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 976bf8ce8039..fc32821f8240 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -299,8 +299,8 @@ static int tracepoint_add_func(struct tracepoint *tp,
* a pointer to it. This array is referenced by __DO_TRACE from
* include/linux/tracepoint.h using rcu_dereference_sched().
*/
- rcu_assign_pointer(tp->funcs, tp_funcs);
tracepoint_update_call(tp, tp_funcs, false);
+ rcu_assign_pointer(tp->funcs, tp_funcs);
static_key_enable(&tp->key);
release_probes(old);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 50142fc08902..f148eacda55a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
unbound_release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
- bool is_last;
+ bool is_last = false;
- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
- return;
+ /*
+ * when @pwq is not linked, it doesn't hold any reference to the
+ * @wq, and @wq is invalid to access.
+ */
+ if (!list_empty(&pwq->pwqs_node)) {
+ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+ return;
- mutex_lock(&wq->mutex);
- list_del_rcu(&pwq->pwqs_node);
- is_last = list_empty(&wq->pwqs);
- mutex_unlock(&wq->mutex);
+ mutex_lock(&wq->mutex);
+ list_del_rcu(&pwq->pwqs_node);
+ is_last = list_empty(&wq->pwqs);
+ mutex_unlock(&wq->mutex);
+ }
mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
diff --git a/lib/Kconfig b/lib/Kconfig
index d241fe476fda..5c9c0687f76d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -683,9 +683,6 @@ config PARMAN
config OBJAGG
tristate "objagg" if COMPILE_TEST
-config STRING_SELFTEST
- tristate "Test string functions"
-
endmenu
config GENERIC_IOREMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 831212722924..5ddd575159fb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config STRING_SELFTEST
+ tristate "Test string functions at runtime"
+
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 271f2ca862c8..f5561ea7d90a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work)
blkcg_unpin_online(blkcg);
fprop_local_destroy_percpu(&wb->memcg_completions);
- percpu_ref_exit(&wb->refcnt);
spin_lock_irq(&cgwb_lock);
list_del(&wb->offline_node);
spin_unlock_irq(&cgwb_lock);
+ percpu_ref_exit(&wb->refcnt);
wb_exit(wb);
WARN_ON_ONCE(!list_empty(&wb->b_attached));
kfree_rcu(wb, rcu);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index d7666ace9d2e..575c685aa642 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -734,6 +734,22 @@ void kfence_shutdown_cache(struct kmem_cache *s)
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
/*
+ * Perform size check before switching kfence_allocation_gate, so that
+ * we don't disable KFENCE without making an allocation.
+ */
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Skip allocations from non-default zones, including DMA. We cannot
+ * guarantee that pages in the KFENCE pool will have the requested
+ * properties (e.g. reside in DMAable memory).
+ */
+ if ((flags & GFP_ZONEMASK) ||
+ (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+ return NULL;
+
+ /*
* allocation_gate only needs to become non-zero, so it doesn't make
* sense to continue writing to it and pay the associated contention
* cost, in case we have a large number of concurrent allocations.
@@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
if (!READ_ONCE(kfence_enabled))
return NULL;
- if (size > PAGE_SIZE)
- return NULL;
-
return kfence_guarded_alloc(s, size, flags);
}
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 7f24b9bcb2ec..942cbc16ad26 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -852,7 +852,7 @@ static void kfence_test_exit(void)
tracepoint_synchronize_unregister();
}
-late_initcall(kfence_test_init);
+late_initcall_sync(kfence_test_init);
module_exit(kfence_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/mm/memblock.c b/mm/memblock.c
index 0041ff62c584..de7b553baa50 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -947,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type,
return true;
/* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
+ !(flags & MEMBLOCK_HOTPLUG))
return true;
/* if we want mirror memory skip non-mirror memory regions */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1bbf239b06f2..8ef06f9e0db1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3574,7 +3574,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val;
if (mem_cgroup_is_root(memcg)) {
- cgroup_rstat_flush(memcg->css.cgroup);
+ /* mem_cgroup_threshold() calls here from irqsafe context */
+ cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap)
diff --git a/mm/memory.c b/mm/memory.c
index 747a01d495f2..25fc46e87214 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4026,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
return ret;
}
- if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
+ if (vmf->prealloc_pte) {
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (likely(pmd_none(*vmf->pmd))) {
+ mm_inc_nr_ptes(vma->vm_mm);
+ pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
+ vmf->prealloc_pte = NULL;
+ }
+ spin_unlock(vmf->ptl);
+ } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM;
+ }
}
/* See comment in handle_pte_fault() */
diff --git a/mm/migrate.c b/mm/migrate.c
index 34a9ad3e0a4f..7e240437e7d9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
LIST_HEAD(migratepages);
new_page_t *new;
bool compound;
- unsigned int nr_pages = thp_nr_pages(page);
+ int nr_pages = thp_nr_pages(page);
/*
* PTE mapped THP or HugeTLB page can't reach here so the page could
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index f5852a058ce0..1854850b4b89 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -156,14 +156,14 @@ static inline void put_memcg_path_buf(void)
#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
do { \
const char *memcg_path; \
- preempt_disable(); \
+ local_lock(&memcg_paths.lock); \
memcg_path = get_mm_memcg_path(mm); \
trace_mmap_lock_##type(mm, \
memcg_path != NULL ? memcg_path : "", \
##__VA_ARGS__); \
if (likely(memcg_path != NULL)) \
put_memcg_path_buf(); \
- preempt_enable(); \
+ local_unlock(&memcg_paths.lock); \
} while (0)
#else /* !CONFIG_MEMCG */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3e97e68aef7a..856b175c15a4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -840,21 +840,24 @@ void init_mem_debugging_and_hardening(void)
}
#endif
- if (_init_on_alloc_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc\n");
- else
- static_branch_enable(&init_on_alloc);
- }
- if (_init_on_free_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_free\n");
- else
- static_branch_enable(&init_on_free);
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+ page_poisoning_requested) {
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ "will take precedence over init_on_alloc and init_on_free\n");
+ _init_on_alloc_enabled_early = false;
+ _init_on_free_enabled_early = false;
}
+ if (_init_on_alloc_enabled_early)
+ static_branch_enable(&init_on_alloc);
+ else
+ static_branch_disable(&init_on_alloc);
+
+ if (_init_on_free_enabled_early)
+ static_branch_enable(&init_on_free);
+ else
+ static_branch_disable(&init_on_free);
+
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!debug_pagealloc_enabled())
return;
diff --git a/mm/secretmem.c b/mm/secretmem.c
index f77d25467a14..030f02ddc7c1 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -152,6 +152,7 @@ static void secretmem_freepage(struct page *page)
}
const struct address_space_operations secretmem_aops = {
+ .set_page_dirty = __set_page_dirty_no_writeback,
.freepage = secretmem_freepage,
.migratepage = secretmem_migratepage,
.isolate_page = secretmem_isolate_page,
diff --git a/mm/slab.h b/mm/slab.h
index f997fd5e42c8..58c01a34e5b8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
continue;
page = virt_to_head_page(p[i]);
- objcgs = page_objcgs(page);
+ objcgs = page_objcgs_check(page);
if (!objcgs)
continue;
diff --git a/mm/slub.c b/mm/slub.c
index 090fa14628f9..af984e4990e8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3236,6 +3236,16 @@ struct detached_freelist {
struct kmem_cache *s;
};
+static inline void free_nonslab_page(struct page *page)
+{
+ unsigned int order = compound_order(page);
+
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+ kfree_hook(page_address(page));
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+ __free_pages(page, order);
+}
+
/*
* This function progressively scans the array with free objects (with
* a limited look ahead) and extract objects belonging to the same
@@ -3272,9 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) {
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- __free_pages(page, compound_order(page));
+ free_nonslab_page(page);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4250,13 +4258,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
- unsigned int order = compound_order(page);
-
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(page, order);
+ free_nonslab_page(page);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index efdf79de3db6..dac70cdd3f41 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1076,11 +1076,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
static bool j1939_session_deactivate(struct j1939_session *session)
{
+ struct j1939_priv *priv = session->priv;
bool active;
- j1939_session_list_lock(session->priv);
+ j1939_session_list_lock(priv);
+ /* This function should be called with a session ref-count of at
+ * least 2.
+ */
+ WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
- j1939_session_list_unlock(session->priv);
+ j1939_session_list_unlock(priv);
return active;
}
@@ -1874,7 +1879,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
if (!session->transmission)
j1939_tp_schedule_txtimer(session, 0);
} else {
- j1939_tp_set_rxtimeout(session, 250);
+ j1939_tp_set_rxtimeout(session, 750);
}
session->last_cmd = 0xff;
consume_skb(se_skb);
diff --git a/net/can/raw.c b/net/can/raw.c
index ed4fcb7ab0c3..cd5a49380116 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
}
+ rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex)
+ if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+ if (!dev) {
+ if (count > 1)
+ kfree(filter);
+ err = -ENODEV;
+ goto out_fil;
+ }
+ }
if (ro->bound) {
/* (try to) register the new filters */
@@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
+ rtnl_unlock();
break;
@@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
err_mask &= CAN_ERR_MASK;
+ rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex)
+ if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ }
/* remove current error mask */
if (ro->bound) {
@@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
+ rtnl_unlock();
break;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index fd2fc2befba9..8fa015319af6 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -9327,18 +9327,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
n = snprintf(name, len, "p%u", attrs->phys.port_number);
if (n < len && attrs->split)
n += snprintf(name + n, len - n, "s%u",
attrs->phys.split_subport_number);
- if (!attrs->split)
- n = snprintf(name, len, "p%u", attrs->phys.port_number);
- else
- n = snprintf(name, len, "p%us%u",
- attrs->phys.port_number,
- attrs->phys.split_subport_number);
-
break;
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
@@ -9380,6 +9372,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
attrs->pci_sf.sf);
break;
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ return -EOPNOTSUPP;
}
if (n >= len)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 39d7be03e568..bac0184cf3de 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1508,7 +1508,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
}
EXPORT_SYMBOL(flow_get_u32_dst);
-/* Sort the source and destination IP (and the ports if the IP are the same),
+/* Sort the source and destination IP and the ports,
* to have consistent hash within the two directions
*/
static inline void __flow_hash_consistentify(struct flow_keys *keys)
@@ -1519,11 +1519,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
addr_diff = (__force u32)keys->addrs.v4addrs.dst -
(__force u32)keys->addrs.v4addrs.src;
- if ((addr_diff < 0) ||
- (addr_diff == 0 &&
- ((__force u16)keys->ports.dst <
- (__force u16)keys->ports.src))) {
+ if (addr_diff < 0)
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+ if ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src) {
swap(keys->ports.src, keys->ports.dst);
}
break;
@@ -1531,13 +1531,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
addr_diff = memcmp(&keys->addrs.v6addrs.dst,
&keys->addrs.v6addrs.src,
sizeof(keys->addrs.v6addrs.dst));
- if ((addr_diff < 0) ||
- (addr_diff == 0 &&
- ((__force u16)keys->ports.dst <
- (__force u16)keys->ports.src))) {
+ if (addr_diff < 0) {
for (i = 0; i < 4; i++)
swap(keys->addrs.v6addrs.src.s6_addr32[i],
keys->addrs.v6addrs.dst.s6_addr32[i]);
+ }
+ if ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src) {
swap(keys->ports.src, keys->ports.dst);
}
break;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 15d71288e741..2d6249b28928 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -584,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
return sk_psock_skb_ingress(psock, skb);
}
-static void sock_drop(struct sock *sk, struct sk_buff *skb)
+static void sk_psock_skb_state(struct sk_psock *psock,
+ struct sk_psock_work_state *state,
+ struct sk_buff *skb,
+ int len, int off)
{
- sk_drops_add(sk, skb);
- kfree_skb(skb);
+ spin_lock_bh(&psock->ingress_lock);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ state->skb = skb;
+ state->len = len;
+ state->off = off;
+ } else {
+ sock_drop(psock->sk, skb);
+ }
+ spin_unlock_bh(&psock->ingress_lock);
}
static void sk_psock_backlog(struct work_struct *work)
{
struct sk_psock *psock = container_of(work, struct sk_psock, work);
struct sk_psock_work_state *state = &psock->work_state;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool ingress;
u32 len, off;
int ret;
mutex_lock(&psock->work_mutex);
- if (state->skb) {
+ if (unlikely(state->skb)) {
+ spin_lock_bh(&psock->ingress_lock);
skb = state->skb;
len = state->len;
off = state->off;
state->skb = NULL;
- goto start;
+ spin_unlock_bh(&psock->ingress_lock);
}
+ if (skb)
+ goto start;
while ((skb = skb_dequeue(&psock->ingress_skb))) {
len = skb->len;
@@ -621,9 +634,8 @@ start:
len, ingress);
if (ret <= 0) {
if (ret == -EAGAIN) {
- state->skb = skb;
- state->len = len;
- state->off = off;
+ sk_psock_skb_state(psock, state, skb,
+ len, off);
goto end;
}
/* Hard errors break pipe and stop xmit. */
@@ -722,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
skb_bpf_redirect_clear(skb);
sock_drop(psock->sk, skb);
}
+ kfree_skb(psock->work_state.skb);
+ /* We null the skb here to ensure that calls to sk_psock_backlog
+ * do not pick up the free'd skb.
+ */
+ psock->work_state.skb = NULL;
__sk_psock_purge_ingress_msg(psock);
}
@@ -773,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
- sk_psock_stop(psock, false);
-
write_lock_bh(&sk->sk_callback_lock);
sk_psock_restore_proto(sk, psock);
rcu_assign_sk_user_data(sk, NULL);
@@ -784,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
sk_psock_stop_verdict(sk, psock);
write_unlock_bh(&sk->sk_callback_lock);
+ sk_psock_stop(psock, false);
+
INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
queue_rcu_work(system_wq, &psock->rwork);
}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 7f0e810c06f4..fe9101d3d69e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -390,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
- skb_reset_network_header(skb);
+ skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b7b27d94ef61..d72347c75f8b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -549,9 +549,10 @@ int ip6_forward(struct sk_buff *skb)
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
- if (proxied > 0)
+ if (proxied > 0) {
+ hdr->hop_limit--;
return ip6_input(skb);
- else if (proxied < 0) {
+ } else if (proxied < 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7180979114e4..ac5cadd02cfa 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
{
u8 rc = LLC_PDU_LEN_U;
- if (addr->sllc_test || addr->sllc_xid)
+ if (addr->sllc_test)
rc = LLC_PDU_LEN_U;
+ else if (addr->sllc_xid)
+ /* We need to expand header to sizeof(struct llc_xid_info)
+ * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
+ * as XID PDU. In llc_ui_sendmsg() we reserved header size and then
+ * filled all other space with user data. If we won't reserve this
+ * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
+ */
+ rc = LLC_PDU_LEN_U_XID;
else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index b554f26c68ee..79d1cef8f15a 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
- llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+ llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 84cc7733ea66..4e6f11e63df3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
struct vif_params *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
int ret;
ret = ieee80211_if_change_type(sdata, type);
@@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
ieee80211_check_fast_rx_iface(sdata);
} else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (params->use_4addr == ifmgd->use_4addr)
+ return 0;
+
sdata->u.mgd.use_4addr = params->use_4addr;
+ if (!ifmgd->associated)
+ return 0;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ if (sta)
+ drv_sta_set_4addr(local, sdata, &sta->sta,
+ params->use_4addr);
+ mutex_unlock(&local->sta_mtx);
+
+ if (params->use_4addr)
+ ieee80211_send_4addr_nullfunc(local, sdata);
}
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 22549b95d1aa..30ce6d2ec7ce 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a00f11a33699..c0ea3b1aa9e1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
ieee80211_tx_skb(sdata, skb);
}
-static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 771921c057e8..2563473b5cf1 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local,
* Need to make a copy and possibly remove radiotap header
* and FCS from the original.
*/
- skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
+ skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
+ 0, GFP_ATOMIC);
if (!skb)
return NULL;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e96981144358..8509778ff31f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
return queued;
}
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ struct rate_control_ref *ref = sdata->local->rate_ctrl;
+ u16 tid;
+
+ if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+ return;
+
+ if (!sta || !sta->sta.ht_cap.ht_supported ||
+ !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+ skb->protocol == sdata->control_port_protocol)
+ return;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ return;
+
+ ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
/*
* initialises @tx
* pass %NULL for the station if unknown, a valid pointer if known
@@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool aggr_check = false;
int tid;
memset(tx, 0, sizeof(*tx));
@@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
} else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
- if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+ if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
tx->sta = sta_info_get(sdata, hdr->addr1);
+ aggr_check = true;
+ }
}
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
@@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_tx *tid_tx;
tid = ieee80211_get_tid(hdr);
-
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx && aggr_check) {
+ ieee80211_aggr_check(sdata, tx->sta, skb);
+ tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ }
+
if (tid_tx) {
bool queued;
@@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-static void
-ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta,
- struct sk_buff *skb)
-{
- struct rate_control_ref *ref = sdata->local->rate_ctrl;
- u16 tid;
-
- if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
- return;
-
- if (!sta || !sta->sta.ht_cap.ht_supported ||
- !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
- skb->protocol == sdata->control_port_protocol)
- return;
-
- tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- if (likely(sta->ampdu_mlme.tid_tx[tid]))
- return;
-
- ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
-}
-
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 83c52df85870..5c03e5106751 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -670,8 +670,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
return false;
tstamp = nf_conn_tstamp_find(ct);
- if (tstamp && tstamp->stop == 0)
+ if (tstamp) {
+ s32 timeout = ct->timeout - nfct_time_stamp;
+
tstamp->stop = ktime_get_real_ns();
+ if (timeout < 0)
+ tstamp->stop -= jiffies_to_nsecs(-timeout);
+ }
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 8fe024a0ae46..8ecad71b3613 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -331,7 +331,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+ u32 timeout;
+
+ timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+ if (READ_ONCE(flow->timeout) != timeout)
+ WRITE_ONCE(flow->timeout, timeout);
if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index de182d1f7c4e..081437dd75b7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8445,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl,
return 0;
}
+static void nf_tables_commit_audit_free(struct list_head *adl)
+{
+ struct nft_audit_data *adp, *adn;
+
+ list_for_each_entry_safe(adp, adn, adl, list) {
+ list_del(&adp->list);
+ kfree(adp);
+ }
+}
+
static void nf_tables_commit_audit_collect(struct list_head *adl,
struct nft_table *table, u32 op)
{
@@ -8509,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
if (ret) {
nf_tables_commit_chain_prepare_cancel(net);
+ nf_tables_commit_audit_free(&adl);
return ret;
}
if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -8518,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
ret = nf_tables_commit_chain_prepare(net, chain);
if (ret < 0) {
nf_tables_commit_chain_prepare_cancel(net);
+ nf_tables_commit_audit_free(&adl);
return ret;
}
}
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
index 50b4e3c9347a..202f57d17bab 100644
--- a/net/netfilter/nfnetlink_hook.c
+++ b/net/netfilter/nfnetlink_hook.c
@@ -174,7 +174,9 @@ static const struct nf_hook_entries *
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
{
const struct nf_hook_entries *hook_head = NULL;
+#ifdef CONFIG_NETFILTER_INGRESS
struct net_device *netdev;
+#endif
switch (pf) {
case NFPROTO_IPV4:
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
index 8088b99f2ee3..304e33cbed9b 100644
--- a/net/netfilter/nft_last.c
+++ b/net/netfilter/nft_last.c
@@ -48,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr,
{
struct nft_last_priv *priv = nft_expr_priv(expr);
- priv->last_jiffies = jiffies;
- priv->last_set = 1;
+ if (READ_ONCE(priv->last_jiffies) != jiffies)
+ WRITE_ONCE(priv->last_jiffies, jiffies);
+ if (READ_ONCE(priv->last_set) == 0)
+ WRITE_ONCE(priv->last_set, 1);
}
static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
+ unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
+ u32 last_set = READ_ONCE(priv->last_set);
__be64 msecs;
- if (time_before(jiffies, priv->last_jiffies))
- priv->last_set = 0;
+ if (time_before(jiffies, last_jiffies)) {
+ WRITE_ONCE(priv->last_set, 0);
+ last_set = 0;
+ }
- if (priv->last_set)
- msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+ if (last_set)
+ msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
else
msecs = 0;
- if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+ if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
goto nla_put_failure;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 0840c635b752..be1595d6979d 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
break;
default:
- return -EAFNOSUPPORT;
+ if (tb[NFTA_NAT_REG_ADDR_MIN])
+ return -EAFNOSUPPORT;
+ break;
}
priv->family = family;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index e71847877248..6c61b7b1838f 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
if (!ipc)
goto err;
- if (sock_queue_rcv_skb(&ipc->sk, skb))
+ if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+ qrtr_port_put(ipc);
goto err;
+ }
qrtr_port_put(ipc);
}
@@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
ipc = qrtr_port_lookup(to->sq_port);
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+ if (ipc)
+ qrtr_port_put(ipc);
kfree_skb(skb);
return -ENODEV;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index eb3c2a34a31c..5ef86fdb1176 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
- if (af->from_addr_param(&paddr, param, peer_port, 0))
+ if (!af->from_addr_param(&paddr, param, peer_port, 0))
return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e48dd909dee5..470dbdc27d58 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
list_for_each_entry_safe(addr, temp,
&net->sctp.local_addr_list, list) {
if (addr->a.sa.sa_family == AF_INET6 &&
- ipv6_addr_equal(&addr->a.v6.sin6_addr,
- &ifa->addr)) {
+ ipv6_addr_equal(&addr->a.v6.sin6_addr,
+ &ifa->addr) &&
+ addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 09a8f23ec709..32df65f68c12 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
if (!sctp_transport_pl_enabled(transport))
return SCTP_DISPOSITION_CONSUME;
- sctp_transport_pl_send(transport);
-
- reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
- if (!reply)
- return SCTP_DISPOSITION_NOMEM;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ if (sctp_transport_pl_send(transport)) {
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
@@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
!sctp_transport_pl_enabled(link))
return SCTP_DISPOSITION_DISCARD;
- sctp_transport_pl_recv(link);
- if (link->pl.state == SCTP_PL_COMPLETE)
+ if (sctp_transport_pl_recv(link))
return SCTP_DISPOSITION_CONSUME;
return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 397a6244dd97..a3d3ca6dd63d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
sctp_transport_pl_update(transport);
}
-void sctp_transport_pl_send(struct sctp_transport *t)
+bool sctp_transport_pl_send(struct sctp_transport *t)
{
- pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
- __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
- if (t->pl.probe_count < SCTP_MAX_PROBES) {
- t->pl.probe_count++;
- return;
- }
+ if (t->pl.probe_count < SCTP_MAX_PROBES)
+ goto out;
+ t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
+ t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
@@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t)
sctp_assoc_sync_pmtu(t->asoc);
}
}
- t->pl.probe_count = 1;
+
+out:
+ if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
+ !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
+ t->pl.raise_count++;
+ return false;
+ }
+
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+ t->pl.probe_count++;
+ return true;
}
-void sctp_transport_pl_recv(struct sctp_transport *t)
+bool sctp_transport_pl_recv(struct sctp_transport *t)
{
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+ t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.pmtu = t->pl.probe_size;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
@@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
if (!t->pl.probe_high) {
t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
SCTP_MAX_PLPMTU);
- return;
+ return false;
}
t->pl.probe_size += SCTP_PL_MIN_STEP;
if (t->pl.probe_size >= t->pl.probe_high) {
@@ -335,14 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
sctp_assoc_sync_pmtu(t->asoc);
}
- } else if (t->pl.state == SCTP_PL_COMPLETE) {
- t->pl.raise_count++;
- if (t->pl.raise_count == 30) {
- /* Raise probe_size again after 30 * interval in Search Complete */
- t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
- t->pl.probe_size += SCTP_PL_MIN_STEP;
- }
+ } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
+ /* Raise probe_size again after 30 * interval in Search Complete */
+ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+ t->pl.probe_size += SCTP_PL_MIN_STEP;
}
+
+ return t->pl.state == SCTP_PL_COMPLETE;
}
static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index e5c43d4d5a75..c9391d38de85 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
if (unlikely(!aead))
return -ENOKEY;
- /* Cow skb data if needed */
- if (likely(!skb_cloned(skb) &&
- (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
- nsg = 1 + skb_shinfo(skb)->nr_frags;
- } else {
- nsg = skb_cow_data(skb, 0, &unused);
- if (unlikely(nsg < 0)) {
- pr_err("RX: skb_cow_data() returned %d\n", nsg);
- return nsg;
- }
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
}
/* Allocate memory for the AEAD operation */
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b0dd183a4dbc..b15b2b1b2f38 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -158,6 +158,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -1515,8 +1516,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
rc = 0;
}
- if (unlikely(syn && !rc))
+ if (unlikely(syn && !rc)) {
tipc_set_sk_state(sk, TIPC_CONNECTING);
+ if (timeout) {
+ timeout = msecs_to_jiffies(timeout);
+ tipc_wait_for_connect(sock, &timeout);
+ }
+ }
return rc ? rc : dlen;
}
@@ -1564,7 +1570,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
return -EMSGSIZE;
/* Handle implicit connection setup */
- if (unlikely(dest)) {
+ if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
rc = __tipc_sendmsg(sock, m, dlen);
if (dlen && dlen == rc) {
tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
@@ -2666,7 +2672,7 @@ static int tipc_listen(struct socket *sock, int len)
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
/* True wake-one mechanism for incoming connections: only
@@ -2675,12 +2681,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* anymore, the common case will execute the loop only once.
*/
for (;;) {
- prepare_to_wait_exclusive(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -2692,7 +2698,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
if (signal_pending(current))
break;
}
- finish_wait(sk_sleep(sk), &wait);
return err;
}
@@ -2709,9 +2714,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern)
{
struct sock *new_sk, *sk = sock->sk;
- struct sk_buff *buf;
struct tipc_sock *new_tsock;
+ struct msghdr m = {NULL,};
struct tipc_msg *msg;
+ struct sk_buff *buf;
long timeo;
int res;
@@ -2757,19 +2763,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
}
/*
- * Respond to 'SYN-' by discarding it & returning 'ACK'-.
- * Respond to 'SYN+' by queuing it on new socket.
+ * Respond to 'SYN-' by discarding it & returning 'ACK'.
+ * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
*/
if (!msg_data_sz(msg)) {
- struct msghdr m = {NULL,};
-
tsk_advance_rx_queue(sk);
- __tipc_sendstream(new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
skb_set_owner_r(buf, new_sk);
}
+ __tipc_sendstream(new_sock, &m, 0);
release_sock(new_sk);
exit:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 89927678c0dc..256c4e31132e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1543,6 +1543,53 @@ out:
return err;
}
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+ /*
+ * Garbage collection of unix sockets starts by selecting a set of
+ * candidate sockets which have reference only from being in flight
+ * (total_refs == inflight_refs). This condition is checked once during
+ * the candidate collection phase, and candidates are marked as such, so
+ * that non-candidates can later be ignored. While inflight_refs is
+ * protected by unix_gc_lock, total_refs (file count) is not, hence this
+ * is an instantaneous decision.
+ *
+ * Once a candidate, however, the socket must not be reinstalled into a
+ * file descriptor while the garbage collection is in progress.
+ *
+ * If the above conditions are met, then the directed graph of
+ * candidates (*) does not change while unix_gc_lock is held.
+ *
+ * Any operations that changes the file count through file descriptors
+ * (dup, close, sendmsg) does not change the graph since candidates are
+ * not installed in fds.
+ *
+ * Dequeing a candidate via recvmsg would install it into an fd, but
+ * that takes unix_gc_lock to decrement the inflight count, so it's
+ * serialized with garbage collection.
+ *
+ * MSG_PEEK is special in that it does not change the inflight count,
+ * yet does install the socket into an fd. The following lock/unlock
+ * pair is to ensure serialization with garbage collection. It must be
+ * done between incrementing the file count and installing the file into
+ * an fd.
+ *
+ * If garbage collection starts after the barrier provided by the
+ * lock/unlock, then it will see the elevated refcount and not mark this
+ * as a candidate. If a garbage collection is already in progress
+ * before the file count was incremented, then the lock/unlock pair will
+ * ensure that garbage collection is finished before progressing to
+ * installing the fd.
+ *
+ * (*) A -> B where B is on the queue of A or B is on the queue of C
+ * which is on the queue of listening socket A.
+ */
+ spin_lock(&unix_gc_lock);
+ spin_unlock(&unix_gc_lock);
+}
+
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
@@ -2192,7 +2239,7 @@ int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+ unix_peek_fds(&scm, skb);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
@@ -2482,7 +2529,7 @@ unlock:
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+ unix_peek_fds(&scm, skb);
sk_peek_offset_fwd(sk, chunk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 50eb405b0690..16c88beea48b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2351,7 +2351,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
for (band = state->band_start;
- band < NUM_NL80211_BANDS; band++) {
+ band < (state->split ?
+ NUM_NL80211_BANDS :
+ NL80211_BAND_60GHZ + 1);
+ band++) {
struct ieee80211_supported_band *sband;
/* omit higher bands for ancient software */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index f03c7ac8e184..7897b1478c3c 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1754,16 +1754,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
* be grouped with this beacon for updates ...
*/
if (!cfg80211_combine_bsses(rdev, new)) {
- kfree(new);
+ bss_ref_put(rdev, new);
goto drop;
}
}
if (rdev->bss_entries >= bss_entries_limit &&
!cfg80211_bss_expire_oldest(rdev)) {
- if (!list_empty(&new->hidden_list))
- list_del(&new->hidden_list);
- kfree(new);
+ bss_ref_put(rdev, new);
goto drop;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 14e32825c339..6a2971a7e6a1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -246,12 +246,18 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
return false;
- if (substream->ops->mmap ||
- (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
- substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
+ if (substream->ops->mmap)
return true;
- return dma_can_mmap(substream->dma_buffer.dev.dev);
+ switch (substream->dma_buffer.dev.type) {
+ case SNDRV_DMA_TYPE_UNKNOWN:
+ return false;
+ case SNDRV_DMA_TYPE_CONTINUOUS:
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return true;
+ default:
+ return dma_can_mmap(substream->dma_buffer.dev.dev);
+ }
}
static int constrain_mask_params(struct snd_pcm_substream *substream,
@@ -3063,9 +3069,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
boundary = 0x7fffffff;
snd_pcm_stream_lock_irq(substream);
/* FIXME: we should consider the boundary for the sync from app */
- if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
- control->appl_ptr = scontrol.appl_ptr;
- else
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream,
+ scontrol.appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else
scontrol.appl_ptr = control->appl_ptr % boundary;
if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = scontrol.avail_min;
@@ -3664,6 +3675,8 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
if (substream->ops->page)
page = substream->ops->page(substream, offset);
+ else if (!snd_pcm_get_dma_buf(substream))
+ page = virt_to_page(runtime->dma_area + offset);
else
page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
if (!page)
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index d8be146793ee..c9d0ba353463 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -319,6 +319,10 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
.device = 0x4b55,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x4b58,
+ },
#endif
/* Alder Lake */
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 5bbe6695689d..7ad8c5f7b664 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -816,6 +816,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
@@ -855,6 +856,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
+ spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@@ -880,6 +882,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
@@ -894,6 +897,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
+ spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4b2cc8cb55c4..e143e69d8184 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1940,6 +1940,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1389cfd5e0db..caaf0e8aac11 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8626,6 +8626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 84e3906abd4f..9449fb40a956 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -576,6 +576,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_rt5682_init,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_play_ops,
SND_SOC_DAILINK_REG(designware1, rt5682, platform),
},
@@ -585,6 +586,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_cap_ops,
SND_SOC_DAILINK_REG(designware2, rt5682, platform),
},
@@ -594,6 +596,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_max_play_ops,
SND_SOC_DAILINK_REG(designware3, mx, platform),
},
@@ -604,6 +607,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_dmic0_cap_ops,
SND_SOC_DAILINK_REG(designware3, adau, platform),
},
@@ -614,6 +618,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_dmic1_cap_ops,
SND_SOC_DAILINK_REG(designware2, adau, platform),
},
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 7ebae3f09435..a3b784ed4f70 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1325,7 +1325,7 @@ config SND_SOC_SSM2305
high-efficiency mono Class-D audio power amplifiers.
config SND_SOC_SSM2518
- tristate
+ tristate "Analog Devices SSM2518 Class-D Amplifier"
depends on I2C
config SND_SOC_SSM2602
@@ -1557,6 +1557,7 @@ config SND_SOC_WCD934X
Qualcomm SoCs like SDM845.
config SND_SOC_WCD938X
+ depends on SND_SOC_WCD938X_SDW
tristate
config SND_SOC_WCD938X_SDW
@@ -1813,11 +1814,6 @@ config SND_SOC_ZL38060
which consists of a Digital Signal Processor (DSP), several Digital
Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
-config SND_SOC_ZX_AUD96P22
- tristate "ZTE ZX AUD96P22 CODEC"
- depends on I2C
- select REGMAP_I2C
-
# Amp
config SND_SOC_LM4857
tristate
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 3000bc128b5b..38356ea2bd6e 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = {
.reg_defaults = rt5631_reg,
.num_reg_defaults = ARRAY_SIZE(rt5631_reg),
.cache_type = REGCACHE_RBTREE,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int rt5631_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index e4c91571abae..abcd6f483788 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -973,10 +973,14 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
- if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
+ if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0);
- if (!snd_soc_dapm_get_pin_status(dapm, "Vref2"))
+ if (!snd_soc_dapm_get_pin_status(dapm, "Vref2") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 51870d50f419..b504d63385b3 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1604,6 +1604,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
ret);
return ret;
}
+ regcache_cache_only(aic31xx->regmap, true);
+
aic31xx->dev = &i2c->dev;
aic31xx->irq = i2c->irq;
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 81952984613d..2513922a0292 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -151,8 +151,8 @@ struct aic31xx_pdata {
#define AIC31XX_WORD_LEN_24BITS 0x02
#define AIC31XX_WORD_LEN_32BITS 0x03
#define AIC31XX_IFACE1_MASTER_MASK GENMASK(3, 2)
-#define AIC31XX_BCLK_MASTER BIT(2)
-#define AIC31XX_WCLK_MASTER BIT(3)
+#define AIC31XX_BCLK_MASTER BIT(3)
+#define AIC31XX_WCLK_MASTER BIT(2)
/* AIC31XX_DATA_OFFSET */
#define AIC31XX_DATA_OFFSET_MASK GENMASK(7, 0)
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index c63b717040ed..dcd8aeb45cb3 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -250,8 +250,8 @@ static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0);
static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0);
/* -12dB min, 0.5dB steps */
static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
-
-static DECLARE_TLV_DB_LINEAR(tlv_spk_vol, TLV_DB_GAIN_MUTE, 0);
+/* -6dB min, 1dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_tas_driver_gain, -5850, 50, 0);
static DECLARE_TLV_DB_SCALE(tlv_amp_vol, 0, 600, 1);
static const char * const lo_cm_text[] = {
@@ -1063,21 +1063,20 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = {
};
static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = {
- SOC_DOUBLE_R_S_TLV("PCM Playback Volume", AIC32X4_LDACVOL,
- AIC32X4_LDACVOL, 0, -0x7f, 0x30, 7, 0, tlv_pcm),
+ SOC_SINGLE_S8_TLV("PCM Playback Volume",
+ AIC32X4_LDACVOL, -0x7f, 0x30, tlv_pcm),
SOC_ENUM("DAC Playback PowerTune Switch", l_ptm_enum),
- SOC_DOUBLE_R_S_TLV("HP Driver Playback Volume", AIC32X4_HPLGAIN,
- AIC32X4_HPLGAIN, 0, -0x6, 0x1d, 5, 0,
- tlv_driver_gain),
- SOC_DOUBLE_R("HP DAC Playback Switch", AIC32X4_HPLGAIN,
- AIC32X4_HPLGAIN, 6, 0x01, 1),
- SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
+ SOC_SINGLE_TLV("HP Driver Gain Volume",
+ AIC32X4_HPLGAIN, 0, 0x74, 1, tlv_tas_driver_gain),
+ SOC_SINGLE("HP DAC Playback Switch", AIC32X4_HPLGAIN, 6, 1, 1),
- SOC_SINGLE_RANGE_TLV("Speaker Driver Playback Volume", TAS2505_SPKVOL1,
- 0, 0, 117, 1, tlv_spk_vol),
- SOC_SINGLE_TLV("Speaker Amplifier Playback Volume", TAS2505_SPKVOL2,
- 4, 5, 0, tlv_amp_vol),
+ SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+ TAS2505_SPKVOL1, 0, 0x74, 1, tlv_tas_driver_gain),
+ SOC_SINGLE_TLV("Speaker Amplifier Playback Volume",
+ TAS2505_SPKVOL2, 4, 5, 0, tlv_amp_vol),
+
+ SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
};
static const struct snd_kcontrol_new hp_output_mixer_controls[] = {
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index 78b76eceff8f..2fcc97370be2 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -3317,13 +3317,6 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
(WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0);
}
- ret = wcd938x_irq_init(wcd938x, component->dev);
- if (ret) {
- dev_err(component->dev, "%s: IRQ init failed: %d\n",
- __func__, ret);
- return ret;
- }
-
wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
WCD938X_IRQ_HPHR_PDM_WD_INT);
wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
@@ -3553,7 +3546,6 @@ static int wcd938x_bind(struct device *dev)
}
wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
- wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
if (!wcd938x->txdev) {
@@ -3562,7 +3554,6 @@ static int wcd938x_bind(struct device *dev)
}
wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
- wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
if (!wcd938x->tx_sdw_dev) {
dev_err(dev, "could not get txslave with matching of dev\n");
@@ -3595,6 +3586,15 @@ static int wcd938x_bind(struct device *dev)
return PTR_ERR(wcd938x->regmap);
}
+ ret = wcd938x_irq_init(wcd938x, dev);
+ if (ret) {
+ dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+ wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
+
ret = wcd938x_set_micbias_data(wcd938x);
if (ret < 0) {
dev_err(dev, "%s: bad micbias pdata\n", __func__);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 37aa020f23f6..549d98241dae 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -282,6 +282,7 @@
/*
* HALO_CCM_CORE_CONTROL
*/
+#define HALO_CORE_RESET 0x00000200
#define HALO_CORE_EN 0x00000001
/*
@@ -1213,7 +1214,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
- ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size);
+ ret = wm_coeff_read_ctrl(ctl, ctl->cache, size);
if (!ret && copy_to_user(bytes, ctl->cache, size))
ret = -EFAULT;
@@ -3333,7 +3334,8 @@ static int wm_halo_start_core(struct wm_adsp *dsp)
{
return regmap_update_bits(dsp->regmap,
dsp->base + HALO_CCM_CORE_CONTROL,
- HALO_CORE_EN, HALO_CORE_EN);
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
}
static void wm_halo_stop_core(struct wm_adsp *dsp)
diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c
index 0e7ed906b341..25daef910aee 100644
--- a/sound/soc/intel/boards/sof_sdw_max98373.c
+++ b/sound/soc/intel/boards/sof_sdw_max98373.c
@@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable)
{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *cpu_dai;
int ret;
+ int j;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- /* enable max98373 first */
- ret = max_98373_trigger(substream, cmd);
- if (ret < 0)
- break;
-
- ret = sdw_trigger(substream, cmd);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ret = sdw_trigger(substream, cmd);
- if (ret < 0)
- break;
-
- ret = max_98373_trigger(substream, cmd);
- break;
- default:
- ret = -EINVAL;
- break;
+ /* set spk pin by playback only */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ return 0;
+
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(cpu_dai->component);
+ char pin_name[16];
+
+ snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
+ codec_dai->component->name_prefix);
+
+ if (enable)
+ ret = snd_soc_dapm_enable_pin(dapm, pin_name);
+ else
+ ret = snd_soc_dapm_disable_pin(dapm, pin_name);
+
+ if (!ret)
+ snd_soc_dapm_sync(dapm);
}
- return ret;
+ return 0;
+}
+
+static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ /* according to soc_pcm_prepare dai link prepare is called first */
+ ret = sdw_prepare(substream);
+ if (ret < 0)
+ return ret;
+
+ return mx8373_enable_spk_pin(substream, true);
+}
+
+static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ /* according to soc_pcm_hw_free dai link free is called first */
+ ret = sdw_hw_free(substream);
+ if (ret < 0)
+ return ret;
+
+ return mx8373_enable_spk_pin(substream, false);
}
static const struct snd_soc_ops max_98373_sdw_ops = {
.startup = sdw_startup,
- .prepare = sdw_prepare,
- .trigger = max98373_sdw_trigger,
- .hw_free = sdw_hw_free,
+ .prepare = mx8373_sdw_prepare,
+ .trigger = sdw_trigger,
+ .hw_free = mx8373_sdw_hw_free,
.shutdown = sdw_shutdown,
};
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 46513bb97904..d1c570ca21ea 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1015,6 +1015,7 @@ out:
static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret = -EINVAL, _ret = 0;
int rollback = 0;
@@ -1055,14 +1056,23 @@ start_err:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
- if (ret < 0)
- break;
+ if (rtd->dai_link->stop_dma_first) {
+ ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
- ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
- if (ret < 0)
- break;
+ ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ } else {
+ ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ }
ret = snd_soc_link_trigger(substream, cmd, rollback);
break;
}
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index a00262184efa..d04ce84fe7cc 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = {
static const struct sof_dev_desc adl_desc = {
.machines = snd_soc_acpi_intel_adl_machines,
.alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index 573374b89b10..d3276b4595af 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -213,19 +213,19 @@ snd_pcm_uframes_t tegra_pcm_pointer(struct snd_soc_component *component,
}
EXPORT_SYMBOL_GPL(tegra_pcm_pointer);
-static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+static int tegra_pcm_preallocate_dma_buffer(struct device *dev, struct snd_pcm *pcm, int stream,
size_t size)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
- buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
+ buf->area = dma_alloc_wc(dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->private_data = NULL;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
- buf->dev.dev = pcm->card->dev;
+ buf->dev.dev = dev;
buf->bytes = size;
return 0;
@@ -244,31 +244,28 @@ static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
if (!buf->area)
return;
- dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
+ dma_free_wc(buf->dev.dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
-static int tegra_pcm_dma_allocate(struct snd_soc_pcm_runtime *rtd,
+static int tegra_pcm_dma_allocate(struct device *dev, struct snd_soc_pcm_runtime *rtd,
size_t size)
{
- struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
- ret = dma_set_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
- ret = tegra_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_PLAYBACK, size);
+ ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_PLAYBACK, size);
if (ret)
goto err;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- ret = tegra_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_CAPTURE, size);
+ ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_CAPTURE, size);
if (ret)
goto err_free_play;
}
@@ -284,7 +281,16 @@ err:
int tegra_pcm_construct(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
- return tegra_pcm_dma_allocate(rtd, tegra_pcm_hardware.buffer_bytes_max);
+ struct device *dev = component->dev;
+
+ /*
+ * Fallback for backwards-compatibility with older device trees that
+ * have the iommus property in the virtual, top-level "sound" node.
+ */
+ if (!of_get_property(dev->of_node, "iommus", NULL))
+ dev = rtd->card->snd_card->dev;
+
+ return tegra_pcm_dma_allocate(dev, rtd, tegra_pcm_hardware.buffer_bytes_max);
}
EXPORT_SYMBOL_GPL(tegra_pcm_construct);
diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c
index a7c0484d44ec..265bbc5a2f96 100644
--- a/sound/soc/ti/j721e-evm.c
+++ b/sound/soc/ti/j721e-evm.c
@@ -197,7 +197,7 @@ static int j721e_configure_refclk(struct j721e_priv *priv,
return ret;
}
- if (priv->hsdiv_rates[domain->parent_clk_id] != scki) {
+ if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) {
dev_dbg(priv->dev,
"%s configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n",
audio_domain == J721E_AUDIO_DOMAIN_CPB ? "CPB" : "IVI",
@@ -278,23 +278,29 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream)
j721e_rule_rate, &priv->rate_range,
SNDRV_PCM_HW_PARAM_RATE, -1);
- mutex_unlock(&priv->mutex);
if (ret)
- return ret;
+ goto out;
/* Reset TDM slots to 32 */
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
- return ret;
+ goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
- return ret;
+ goto out;
}
- return 0;
+ if (ret == -ENOTSUPP)
+ ret = 0;
+out:
+ if (ret)
+ domain->active--;
+ mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int j721e_audio_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 30b3e128e28d..f4cdaf1ba44a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -3295,7 +3295,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
{
struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
static const char * const val_types[] = {
- "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
+ [USB_MIXER_BOOLEAN] = "BOOLEAN",
+ [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN",
+ [USB_MIXER_S8] = "S8",
+ [USB_MIXER_U8] = "U8",
+ [USB_MIXER_S16] = "S16",
+ [USB_MIXER_U16] = "U16",
+ [USB_MIXER_S32] = "S32",
+ [USB_MIXER_U32] = "U32",
+ [USB_MIXER_BESPOKEN] = "BESPOKEN",
};
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->head.id,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 8b8bee3c3dd6..e7accd87e063 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1897,6 +1897,9 @@ static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
+ REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
{ 0 } /* terminator */
};
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index a3e593ddfafc..2debba4e8a3a 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -1,4 +1,233 @@
{
+ "map access: known scalar += value_ptr unknown vs const",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs unknown",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs const (ne)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs const (eq)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (eq)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (lt)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (gt)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
"map access: known scalar += value_ptr from different maps",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 06a351b4f93b..0709af0144c8 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -38,6 +38,7 @@
/x86_64/xen_vmcall_test
/x86_64/xss_msr_test
/x86_64/vmx_pmu_msrs_test
+/access_tracking_perf_test
/demand_paging_test
/dirty_log_test
/dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b853be2ae3c6..5832f510a16c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index a16c8f05366c..cc898181faab 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
#define VREGS_SUBLIST \
{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
#define PMU_SUBLIST \
- { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+ { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
+ .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
#define SVE_SUBLIST \
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
new file mode 100644
index 000000000000..e2baa187a21e
--- /dev/null
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * access_tracking_perf_test
+ *
+ * Copyright (C) 2021, Google, Inc.
+ *
+ * This test measures the performance effects of KVM's access tracking.
+ * Access tracking is driven by the MMU notifiers test_young, clear_young, and
+ * clear_flush_young. These notifiers do not have a direct userspace API,
+ * however the clear_young notifier can be triggered by marking a pages as idle
+ * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
+ * enable access tracking on guest memory.
+ *
+ * To measure performance this test runs a VM with a configurable number of
+ * vCPUs that each touch every page in disjoint regions of memory. Performance
+ * is measured in the time it takes all vCPUs to finish touching their
+ * predefined region.
+ *
+ * Note that a deterministic correctness test of access tracking is not possible
+ * by using page_idle as it exists today. This is for a few reasons:
+ *
+ * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
+ * means subsequent guest accesses are not guaranteed to see page table
+ * updates made by KVM until some time in the future.
+ *
+ * 2. page_idle only operates on LRU pages. Newly allocated pages are not
+ * immediately allocated to LRU lists. Instead they are held in a "pagevec",
+ * which is drained to LRU lists some time in the future. There is no
+ * userspace API to force this drain to occur.
+ *
+ * These limitations are worked around in this test by using a large enough
+ * region of memory for each vCPU such that the number of translations cached in
+ * the TLB and the number of pages held in pagevecs are a small fraction of the
+ * overall workload. And if either of those conditions are not true this test
+ * will fail rather than silently passing.
+ */
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
+
+/* Global variable used to synchronize all of the vCPU threads. */
+static int iteration = -1;
+
+/* Defines what vCPU threads should do during a given iteration. */
+static enum {
+ /* Run the vCPU to access all its memory. */
+ ITERATION_ACCESS_MEMORY,
+ /* Mark the vCPU's memory idle in page_idle. */
+ ITERATION_MARK_IDLE,
+} iteration_work;
+
+/* Set to true when vCPU threads should exit. */
+static bool done;
+
+/* The iteration that was last completed by each vCPU. */
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+/* Whether to overlap the regions of memory vCPUs access. */
+static bool overlap_memory_access;
+
+struct test_params {
+ /* The backing source for the region of memory. */
+ enum vm_mem_backing_src_type backing_src;
+
+ /* The amount of memory to allocate for each vCPU. */
+ uint64_t vcpu_memory_bytes;
+
+ /* The number of vCPUs to create in the VM. */
+ int vcpus;
+};
+
+static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+{
+ uint64_t value;
+ off_t offset = index * sizeof(value);
+
+ TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
+ "pread from %s offset 0x%" PRIx64 " failed!",
+ filename, offset);
+
+ return value;
+
+}
+
+#define PAGEMAP_PRESENT (1ULL << 63)
+#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
+
+static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+{
+ uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
+ uint64_t entry;
+ uint64_t pfn;
+
+ entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+ if (!(entry & PAGEMAP_PRESENT))
+ return 0;
+
+ pfn = entry & PAGEMAP_PFN_MASK;
+ if (!pfn) {
+ print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
+ exit(KSFT_SKIP);
+ }
+
+ return pfn;
+}
+
+static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+{
+ uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+
+ return !!((bits >> (pfn % 64)) & 1);
+}
+
+static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+{
+ uint64_t bits = 1ULL << (pfn % 64);
+
+ TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
+ "Set page_idle bits for PFN 0x%" PRIx64, pfn);
+}
+
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+{
+ uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
+ uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+ uint64_t page;
+ uint64_t still_idle = 0;
+ uint64_t no_pfn = 0;
+ int page_idle_fd;
+ int pagemap_fd;
+
+ /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
+ if (overlap_memory_access && vcpu_id)
+ return;
+
+ page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+ TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
+
+ for (page = 0; page < pages; page++) {
+ uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+ uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+
+ if (!pfn) {
+ no_pfn++;
+ continue;
+ }
+
+ if (is_page_idle(page_idle_fd, pfn)) {
+ still_idle++;
+ continue;
+ }
+
+ mark_page_idle(page_idle_fd, pfn);
+ }
+
+ /*
+ * Assumption: Less than 1% of pages are going to be swapped out from
+ * under us during this test.
+ */
+ TEST_ASSERT(no_pfn < pages / 100,
+ "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
+ vcpu_id, no_pfn, pages);
+
+ /*
+ * Test that at least 90% of memory has been marked idle (the rest might
+ * not be marked idle because the pages have not yet made it to an LRU
+ * list or the translations are still cached in the TLB). 90% is
+ * arbitrary; high enough that we ensure most memory access went through
+ * access tracking but low enough as to not make the test too brittle
+ * over time and across architectures.
+ */
+ TEST_ASSERT(still_idle < pages / 10,
+ "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
+ PRIu64 ").\n",
+ vcpu_id, still_idle, pages);
+
+ close(page_idle_fd);
+ close(pagemap_fd);
+}
+
+static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
+ uint64_t expected_ucall)
+{
+ struct ucall uc;
+ uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+
+ TEST_ASSERT(expected_ucall == actual_ucall,
+ "Guest exited unexpectedly (expected ucall %" PRIu64
+ ", got %" PRIu64 ")",
+ expected_ucall, actual_ucall);
+}
+
+static bool spin_wait_for_next_iteration(int *current_iteration)
+{
+ int last_iteration = *current_iteration;
+
+ do {
+ if (READ_ONCE(done))
+ return false;
+
+ *current_iteration = READ_ONCE(iteration);
+ } while (last_iteration == *current_iteration);
+
+ return true;
+}
+
+static void *vcpu_thread_main(void *arg)
+{
+ struct perf_test_vcpu_args *vcpu_args = arg;
+ struct kvm_vm *vm = perf_test_args.vm;
+ int vcpu_id = vcpu_args->vcpu_id;
+ int current_iteration = -1;
+
+ vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+ while (spin_wait_for_next_iteration(&current_iteration)) {
+ switch (READ_ONCE(iteration_work)) {
+ case ITERATION_ACCESS_MEMORY:
+ vcpu_run(vm, vcpu_id);
+ assert_ucall(vm, vcpu_id, UCALL_SYNC);
+ break;
+ case ITERATION_MARK_IDLE:
+ mark_vcpu_memory_idle(vm, vcpu_id);
+ break;
+ };
+
+ vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+ }
+
+ return NULL;
+}
+
+static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+{
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+ target_iteration) {
+ continue;
+ }
+}
+
+/* The type of memory accesses to perform in the VM. */
+enum access_type {
+ ACCESS_READ,
+ ACCESS_WRITE,
+};
+
+static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+{
+ struct timespec ts_start;
+ struct timespec ts_elapsed;
+ int next_iteration;
+ int vcpu_id;
+
+ /* Kick off the vCPUs by incrementing iteration. */
+ next_iteration = ++iteration;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+ /* Wait for all vCPUs to finish the iteration. */
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+ spin_wait_for_vcpu(vcpu_id, next_iteration);
+
+ ts_elapsed = timespec_elapsed(ts_start);
+ pr_info("%-30s: %ld.%09lds\n",
+ description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
+}
+
+static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
+ const char *description)
+{
+ perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
+ sync_global_to_guest(vm, perf_test_args);
+ iteration_work = ITERATION_ACCESS_MEMORY;
+ run_iteration(vm, vcpus, description);
+}
+
+static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+{
+ /*
+ * Even though this parallelizes the work across vCPUs, this is still a
+ * very slow operation because page_idle forces the test to mark one pfn
+ * at a time and the clear_young notifier serializes on the KVM MMU
+ * lock.
+ */
+ pr_debug("Marking VM memory idle (slow)...\n");
+ iteration_work = ITERATION_MARK_IDLE;
+ run_iteration(vm, vcpus, "Mark memory idle");
+}
+
+static pthread_t *create_vcpu_threads(int vcpus)
+{
+ pthread_t *vcpu_threads;
+ int i;
+
+ vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
+ TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
+
+ for (i = 0; i < vcpus; i++) {
+ vcpu_last_completed_iteration[i] = iteration;
+ pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
+ &perf_test_args.vcpu_args[i]);
+ }
+
+ return vcpu_threads;
+}
+
+static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
+{
+ int i;
+
+ /* Set done to signal the vCPU threads to exit */
+ done = true;
+
+ for (i = 0; i < vcpus; i++)
+ pthread_join(vcpu_threads[i], NULL);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+ struct test_params *params = arg;
+ struct kvm_vm *vm;
+ pthread_t *vcpu_threads;
+ int vcpus = params->vcpus;
+
+ vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes,
+ params->backing_src);
+
+ perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
+ !overlap_memory_access);
+
+ vcpu_threads = create_vcpu_threads(vcpus);
+
+ pr_info("\n");
+ access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+
+ /* As a control, read and write to the populated memory first. */
+ access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
+ access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+
+ /* Repeat on memory that has been marked as idle. */
+ mark_memory_idle(vm, vcpus);
+ access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
+ mark_memory_idle(vm, vcpus);
+ access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+
+ terminate_vcpu_threads(vcpu_threads, vcpus);
+ free(vcpu_threads);
+ perf_test_destroy_vm(vm);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n",
+ name);
+ puts("");
+ printf(" -h: Display this help message.");
+ guest_modes_help();
+ printf(" -b: specify the size of the memory region which should be\n"
+ " dirtied by each vCPU. e.g. 10M or 3G.\n"
+ " (default: 1G)\n");
+ printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
+ printf(" -s: specify the type of memory that should be used to\n"
+ " back the guest data region.\n\n");
+ backing_src_help();
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct test_params params = {
+ .backing_src = VM_MEM_SRC_ANONYMOUS,
+ .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
+ .vcpus = 1,
+ };
+ int page_idle_fd;
+ int opt;
+
+ guest_modes_append_default();
+
+ while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
+ switch (opt) {
+ case 'm':
+ guest_modes_cmdline(optarg);
+ break;
+ case 'b':
+ params.vcpu_memory_bytes = parse_size(optarg);
+ break;
+ case 'v':
+ params.vcpus = atoi(optarg);
+ break;
+ case 'o':
+ overlap_memory_access = true;
+ break;
+ case 's':
+ params.backing_src = parse_backing_src_type(optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+ if (page_idle_fd < 0) {
+ print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
+ exit(KSFT_SKIP);
+ }
+ close(page_idle_fd);
+
+ for_each_guest_mode(run_test, &params);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 04a2641261be..80cbd3a748c0 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
break;
case 'o':
p.partition_vcpu_memory_access = false;
+ break;
case 's':
p.backing_src = parse_backing_src_type(optarg);
break;
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index b0031f2d38fd..ecec30865a74 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -320,7 +320,7 @@ int main(int ac, char **av)
run_delay = get_run_delay();
pthread_create(&thread, &attr, do_steal_time, NULL);
do
- pthread_yield();
+ sched_yield();
while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
pthread_join(thread, NULL);
run_delay = get_run_delay() - run_delay;
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index e363bdaff59d..2ea438e6b8b1 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -210,8 +210,10 @@ static void anon_release_pages(char *rel_area)
static void anon_allocate_area(void **alloc_area)
{
- if (posix_memalign(alloc_area, page_size, nr_pages * page_size))
- err("posix_memalign() failed");
+ *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (*alloc_area == MAP_FAILED)
+ err("mmap of anonymous memory failed");
}
static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 986959833d70..d20fba0fc290 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3110,6 +3110,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
++vcpu->stat.generic.halt_poll_invalid;
goto out;
}
+ cpu_relax();
poll_end = cur = ktime_get();
} while (kvm_vcpu_can_poll(cur, stop));
}
@@ -4390,6 +4391,16 @@ struct compat_kvm_dirty_log {
};
};
+struct compat_kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ compat_uptr_t dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4399,6 +4410,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CLEAR_DIRTY_LOG: {
+ struct compat_kvm_clear_dirty_log compat_log;
+ struct kvm_clear_dirty_log log;
+
+ if (copy_from_user(&compat_log, (void __user *)arg,
+ sizeof(compat_log)))
+ return -EFAULT;
+ log.slot = compat_log.slot;
+ log.num_pages = compat_log.num_pages;
+ log.first_page = compat_log.first_page;
+ log.padding2 = compat_log.padding2;
+ log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
+
+ r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+ break;
+ }
+#endif
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;