summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt2
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml89
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst2
-rw-r--r--Documentation/networking/dsa/sja1105.rst2
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h1
-rw-r--r--arch/alpha/include/asm/jensen.h8
-rw-r--r--arch/alpha/kernel/sys_jensen.c10
-rw-r--r--arch/alpha/lib/Makefile1
-rw-r--r--arch/alpha/lib/udiv-qrnnd.S (renamed from arch/alpha/math-emu/qrnnd.S)2
-rw-r--r--arch/alpha/math-emu/Makefile2
-rw-r--r--arch/alpha/math-emu/math.c2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/parisc/lib/iomap.c4
-rw-r--r--arch/powerpc/kernel/interrupt.c43
-rw-r--r--arch/powerpc/kernel/interrupt_64.S41
-rw-r--r--arch/powerpc/kernel/mce.c17
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S36
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/Makefile7
-rw-r--r--arch/s390/configs/debug_defconfig8
-rw-r--r--arch/s390/configs/defconfig5
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/net/bpf_jit_comp.c70
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/sh/boot/Makefile16
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/sparc/lib/iomap.c2
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/Makefile_32.cpu12
-rw-r--r--arch/x86/kernel/cpu/mce/core.c43
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/pat/memtype.c7
-rw-r--r--arch/x86/xen/enlighten_pv.c7
-rw-r--r--arch/x86/xen/mmu_pv.c7
-rw-r--r--block/blk-cgroup.c18
-rw-r--r--block/blk-integrity.c9
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--drivers/base/power/trace.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c109
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h86
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h5
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c481
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c84
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c21
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c13
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c13
-rw-r--r--drivers/net/dsa/b53/b53_srab.c21
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/dsa_loop.c22
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c16
-rw-r--r--drivers/net/dsa/lan9303-core.c6
-rw-r--r--drivers/net/dsa/lan9303.h1
-rw-r--r--drivers/net/dsa/lan9303_i2c.c24
-rw-r--r--drivers/net/dsa/lan9303_mdio.c15
-rw-r--r--drivers/net/dsa/lantiq_gswip.c18
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c11
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c13
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c14
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c8
-rw-r--r--drivers/net/dsa/mt7530.c18
-rw-r--r--drivers/net/dsa/mv88e6060.c18
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c38
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c73
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h6
-rw-r--r--drivers/net/dsa/ocelot/felix.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c22
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c20
-rw-r--r--drivers/net/dsa/qca/ar9331.c18
-rw-r--r--drivers/net/dsa/qca8k.c18
-rw-r--r--drivers/net/dsa/realtek-smi-core.c22
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c21
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c6
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c6
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c18
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c80
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c52
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c21
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c8
-rw-r--r--drivers/net/hamradio/dmascc.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c2
-rw-r--r--drivers/net/phy/mdio_device.c11
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nfc/st-nci/spi.c1
-rw-r--r--drivers/nvme/host/core.c17
-rw-r--r--drivers/nvme/host/multipath.c7
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/host/tcp.c20
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/of/device.c6
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/quirks.c9
-rw-r--r--drivers/pci/vpd.c36
-rw-r--r--drivers/platform/x86/amd-pmc.c2
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/hid.c27
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c3
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c54
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/regulator/max14577-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-tegra20-slink.c5
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c62
-rw-r--r--drivers/xen/swiotlb-xen.c37
-rw-r--r--fs/afs/callback.c44
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/afs/dir.c57
-rw-r--r--fs/afs/dir_edit.c4
-rw-r--r--fs/afs/file.c86
-rw-r--r--fs/afs/fs_probe.c8
-rw-r--r--fs/afs/fsclient.c31
-rw-r--r--fs/afs/inode.c98
-rw-r--r--fs/afs/internal.h21
-rw-r--r--fs/afs/protocol_afs.h15
-rw-r--r--fs/afs/protocol_yfs.h6
-rw-r--r--fs/afs/rotate.c1
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/write.c29
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h1
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/cifs/cifsacl.h1
-rw-r--r--fs/cifs/cifsencrypt.c1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h1
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/dns_resolve.c1
-rw-r--r--fs/cifs/dns_resolve.h4
-rw-r--r--fs/cifs/export.c1
-rw-r--r--fs/cifs/file.c3
-rw-r--r--fs/cifs/fscache.c2
-rw-r--r--fs/cifs/fscache.h2
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/cifs/ioctl.c3
-rw-r--r--fs/cifs/link.c1
-rw-r--r--fs/cifs/misc.c42
-rw-r--r--fs/cifs/netmisc.c1
-rw-r--r--fs/cifs/ntlmssp.h1
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/cifs/rfc1002pdu.h1
-rw-r--r--fs/cifs/sess.c1
-rw-r--r--fs/cifs/smb2file.c1
-rw-r--r--fs/cifs/smb2glob.h1
-rw-r--r--fs/cifs/smb2inode.c1
-rw-r--r--fs/cifs/smb2misc.c1
-rw-r--r--fs/cifs/smb2pdu.c1
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2status.h1
-rw-r--r--fs/cifs/smb2transport.c1
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/cifs/transport.c1
-rw-r--r--fs/cifs/winucase.c1
-rw-r--r--fs/cifs/xattr.c1
-rw-r--r--fs/io-wq.c27
-rw-r--r--fs/io_uring.c221
-rw-r--r--fs/ksmbd/misc.c76
-rw-r--r--fs/ksmbd/misc.h3
-rw-r--r--fs/ksmbd/smb2pdu.c18
-rw-r--r--fs/ksmbd/transport_rdma.c1
-rw-r--r--fs/lockd/svcxdr.h13
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/qnx4/dir.c36
-rw-r--r--fs/smbfs_common/smbfsctl.h2
-rw-r--r--include/asm-generic/io.h26
-rw-r--r--include/asm-generic/iomap.h10
-rw-r--r--include/asm-generic/pci_iomap.h3
-rw-r--r--include/linux/dsa/ocelot.h2
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/packing.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/uio.h21
-rw-r--r--include/net/dsa.h9
-rw-r--r--include/net/sock.h1
-rw-r--r--include/trace/events/afs.h8
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/linux/io_uring.h8
-rw-r--r--init/main.c6
-rw-r--r--kernel/dma/debug.c3
-rw-r--r--kernel/dma/mapping.c3
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/locking/rwbase_rt.c65
-rw-r--r--lib/iov_iter.c36
-rw-r--r--lib/packing.c2
-rw-r--r--lib/pci_iomap.c43
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory.c1
-rw-r--r--mm/workingset.c1
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/sock.c37
-rw-r--r--net/dsa/dsa2.c114
-rw-r--r--net/dsa/tag_ocelot.c2
-rw-r--r--net/dsa/tag_ocelot_8021q.c2
-rw-r--r--net/ipv4/nexthop.c21
-rw-r--r--net/smc/smc_clc.c3
-rw-r--r--net/smc/smc_core.c2
-rw-r--r--scripts/Makefile.clang5
-rw-r--r--scripts/Makefile.modpost2
-rwxr-xr-xscripts/checkkconfigsymbols.py11
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py1
-rw-r--r--tools/lib/perf/evsel.c64
-rw-r--r--tools/perf/builtin-script.c24
-rw-r--r--tools/perf/ui/browser.c33
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c24
-rw-r--r--tools/perf/util/bpf-event.c3
-rw-r--r--tools/perf/util/machine.c1
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh2
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile5
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c5
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S37
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c36
332 files changed, 3498 insertions, 1318 deletions
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index b962fa6d649c..d79d36ac0c44 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -54,7 +54,7 @@ properties:
- const: toradex,apalis_t30
- const: nvidia,tegra30
- items:
- - const: toradex,apalis_t30-eval-v1.1
+ - const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index fbb59c9ddda6..78044c340e20 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -9,7 +9,7 @@ function block.
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
DISP function blocks
====================
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644
index 000000000000..b9ca8ef4f2be
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+ - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+ Each Samsung UFS host controller instance should have its own node.
+ This binding define Samsung specific binding other then what is used
+ in the common ufshcd bindings
+ [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+ compatible:
+ enum:
+ - samsung,exynos7-ufs
+
+ reg:
+ items:
+ - description: HCI register
+ - description: vendor specific register
+ - description: unipro register
+ - description: UFS protector register
+
+ reg-names:
+ items:
+ - const: hci
+ - const: vs_hci
+ - const: unipro
+ - const: ufsp
+
+ clocks:
+ items:
+ - description: ufs link core clock
+ - description: unipro main clock
+
+ clock-names:
+ items:
+ - const: core_clk
+ - const: sclk_unipro_main
+
+ interrupts:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: ufs-phy
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/exynos7-clk.h>
+
+ ufs: ufs@15570000 {
+ compatible = "samsung,exynos7-ufs";
+ reg = <0x15570000 0x100>,
+ <0x15570100 0x100>,
+ <0x15571000 0x200>,
+ <0x15572000 0x300>;
+ reg-names = "hci", "vs_hci", "unipro", "ufsp";
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+ <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+ clock-names = "core_clk", "sclk_unipro_main";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ phys = <&ufs_phy>;
+ phy-names = "ufs-phy";
+ };
+...
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index e7d9cbff771b..67b7a701ce9e 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -851,7 +851,7 @@ NOTES:
- 0x88A8 traffic will not be received unless VLAN stripping is disabled with
the following command::
- # ethool -K <ethX> rxvlan off
+ # ethtool -K <ethX> rxvlan off
- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
configured on the same port. 0x88a8/0x8100 traffic will not be received if
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
index 564caeebe2b2..29b1bae0cf00 100644
--- a/Documentation/networking/dsa/sja1105.rst
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -296,7 +296,7 @@ not available.
Device Tree bindings and board design
=====================================
-This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
and aims to showcase some potential switch caveats.
RMII PHY role and out-of-band signaling
diff --git a/MAINTAINERS b/MAINTAINERS
index eeb4c70b3d5b..7f4615336fa5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2804,9 +2804,8 @@ F: arch/arm/mach-pxa/include/mach/vpac270.h
F: arch/arm/mach-pxa/vpac270.c
ARM/VT8500 ARM ARCHITECTURE
-M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
@@ -13255,9 +13254,9 @@ F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
-M: Ley Foon Tan <ley.foon.tan@intel.com>
+M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
F: arch/nios2/
NITRO ENCLAVES (NE)
@@ -14342,7 +14341,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
F: drivers/pci/controller/pci-ixp4xx.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M: Jonathan Derrick <jonathan.derrick@intel.com>
+M: Nirmal Patel <nirmal.patel@linux.intel.com>
+R: Jonathan Derrick <jonathan.derrick@linux.dev>
L: linux-pci@vger.kernel.org
S: Supported
F: drivers/pci/controller/vmd.c
@@ -16955,7 +16955,6 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
-M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -17968,10 +17967,11 @@ F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux-foundation.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c
@@ -20474,7 +20474,6 @@ F: samples/bpf/xdpsock*
F: tools/lib/bpf/xsk*
XEN BLOCK SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
@@ -20522,7 +20521,7 @@ S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/pci/*xen*
@@ -20545,7 +20544,8 @@ S: Supported
F: sound/xen/*
XEN SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
+M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux-foundation.org
S: Supported
diff --git a/Makefile b/Makefile
index 34a0afc3a8eb..5e7c1d854441 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Opossums on Parade
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 02e5b673bdad..4e87783c90ad 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -20,7 +20,7 @@ config ALPHA
select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
- select GENERIC_PCI_IOMAP if PCI
+ select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
config ALPHA_JENSEN
bool "Jensen"
- depends on BROKEN
select HAVE_EISA
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
index b34cc1f06720..c8ae46fc2e74 100644
--- a/arch/alpha/include/asm/asm-prototypes.h
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -16,3 +16,4 @@ extern void __divlu(void);
extern void __remlu(void);
extern void __divqu(void);
extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h
index 916895155a88..1c4131453db2 100644
--- a/arch/alpha/include/asm/jensen.h
+++ b/arch/alpha/include/asm/jensen.h
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
* convinced that I need one of the newer machines.
*/
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
{
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
}
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
{
*(vuip)((addr << 9) + EISA_VL82C106) = b;
mb();
}
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
{
long result;
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
return __kernel_extbl(result, addr & 3);
}
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
{
jensen_set_hae(0);
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index e5d870ff225f..5c9c88428124 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -7,6 +7,11 @@
*
* Code supporting the Jensen.
*/
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef __EXTERN_INLINE
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -17,11 +22,6 @@
#include <asm/ptrace.h>
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef __EXTERN_INLINE
-
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 854d5e79979e..1cc74f7b50ef 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
ev67-$(CONFIG_ALPHA_EV67) := ev67-
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
+ udiv-qrnnd.o \
udelay.o \
$(ev6-y)memset.o \
$(ev6-y)memcpy.o \
diff --git a/arch/alpha/math-emu/qrnnd.S b/arch/alpha/lib/udiv-qrnnd.S
index d6373ec1bff9..b887aa5428e5 100644
--- a/arch/alpha/math-emu/qrnnd.S
+++ b/arch/alpha/lib/udiv-qrnnd.S
@@ -25,6 +25,7 @@
# along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
+#include <asm/export.h>
.set noreorder
.set noat
@@ -161,3 +162,4 @@ $Odd:
ret $31,($26),1
.end __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
index 6eda0973e183..3206402a8792 100644
--- a/arch/alpha/math-emu/Makefile
+++ b/arch/alpha/math-emu/Makefile
@@ -7,4 +7,4 @@ ccflags-y := -w
obj-$(CONFIG_MATHEMU) += math-emu.o
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index f7cef66af88d..4212258f3cfd 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress:
return si_code;
}
-
-EXPORT_SYMBOL(__udiv_qrnnd);
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5a294f20e9de..ff4962750b3d 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
void sve_alloc(struct task_struct *task)
{
if (task->thread.sve_state) {
- memset(task->thread.sve_state, 0, sve_state_size(current));
+ memset(task->thread.sve_state, 0, sve_state_size(task));
return;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 19100fe8f7e4..40adb8cdbf5a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
-#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
@@ -58,7 +57,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index f03adb1999e7..367f6397bda7 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
}
}
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
+EXPORT_SYMBOL(pci_iounmap);
+#endif
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index a73f3f70a657..de10a2697258 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -18,6 +18,7 @@
#include <asm/switch_to.h>
#include <asm/syscall.h>
#include <asm/time.h>
+#include <asm/tm.h>
#include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ current_thread_info()->flags |= _TIF_RESTOREALL;
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index d4212d2ff0b5..ec950b08a8dc 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -12,7 +12,6 @@
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/ptrace.h>
-#include <asm/tm.h>
.section ".toc","aw"
SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
.globl system_call_vectored_\name
system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
/*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used.
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 47a683cd00d2..fd829f7f25a4 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
{
int index;
struct machine_check_event evt;
+ unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt));
- /* Queue irq work to process this event later. */
- irq_work_queue(&mce_event_process_work);
+ /*
+ * Queue irq work to process this event later. Before
+ * queuing the work enable translation for non radix LPAR,
+ * as irq_work_queue may try to access memory outside RMO
+ * region.
+ */
+ if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+ msr = mfmsr();
+ mtmsr(msr | MSR_IR | MSR_DR);
+ irq_work_queue(&mce_event_process_work);
+ mtmsr(msr);
+ } else {
+ irq_work_queue(&mce_event_process_work);
+ }
}
void mce_common_process_ue(struct pt_regs *regs,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 75079397c2a5..90484425a1e6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -PPC_MIN_STKFRM(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
+ /*
+ * It's possible that treclaim. may modify registers, if we have lost
+ * track of fake-suspend state in the guest due to it using rfscv.
+ * Save and restore registers in case this occurs.
+ */
+ mfspr r3, SPRN_DSCR
+ mfspr r4, SPRN_XER
+ mfspr r5, SPRN_AMR
+ /* SPRN_TAR would need to be saved here if the kernel ever used it */
+ mfcr r12
+ SAVE_NVGPRS(r1)
+ SAVE_GPR(2, r1)
+ SAVE_GPR(3, r1)
+ SAVE_GPR(4, r1)
+ SAVE_GPR(5, r1)
+ stw r12, 8(r1)
+ std r1, HSTATE_HOST_R1(r13)
+
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
+ GET_PACA(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ REST_GPR(2, r1)
+ REST_GPR(3, r1)
+ REST_GPR(4, r1)
+ REST_GPR(5, r1)
+ lwz r12, 8(r1)
+ REST_NVGPRS(r1)
+ mtspr SPRN_DSCR, r3
+ mtspr SPRN_XER, r4
+ mtspr SPRN_AMR, r5
+ mtcr r12
+ HMT_MEDIUM
+
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
- addi r1, r1, PPC_MIN_STKFRM
+ addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 5c1a157a83b8..244a727c6ba4 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
if (xics_ics->check(xics_ics, hwirq))
return -EINVAL;
- /* No chip data for the XICS domain */
+ /* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
- NULL, handle_fasteoi_irq, NULL, NULL);
+ xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2bd90c51efd3..b86de61b8caa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -685,16 +685,6 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit.
-config WARN_DYNAMIC_STACK
- def_bool n
- prompt "Emit compiler warnings for function with dynamic stack usage"
- help
- This option enables the compiler option -mwarn-dynamicstack. If the
- compiler supports this options generates warnings for functions
- that dynamically allocate stack space using alloca.
-
- Say N if you are unsure.
-
endmenu
menu "I/O subsystem"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a3cf33ad009f..450b351dfa8e 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
endif
-ifdef CONFIG_WARN_DYNAMIC_STACK
- ifneq ($(call cc-option,-mwarn-dynamicstack),)
- KBUILD_CFLAGS += -mwarn-dynamicstack
- KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
- endif
-endif
-
ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 37b6115ed80e..6aad18ee131d 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 56a1cc85c5d7..f08b161c9446 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 36dbf5043fc0..aa995d91cd1d 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 88419263a89a..840d8594437d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
- /* Branch instruction needs 6 bytes */ \
- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,imm */
- EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ if (imm != 0) {
+ /* alfi %dst,imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,-imm */
- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ if (imm != 0) {
+ /* alfi %dst,-imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
- /* agfi %dst,-imm */
- EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ if (imm == -0x80000000) {
+ /* algfi %dst,0x80000000 */
+ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+ } else {
+ /* agfi %dst,-imm */
+ EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ }
break;
/*
* BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
- if (imm == 1)
- break;
- /* msfi %r5,imm */
- EMIT6_IMM(0xc2010000, dst_reg, imm);
+ if (imm != 1) {
+ /* msfi %r5,imm */
+ EMIT6_IMM(0xc2010000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
+ else
+ EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
- if (!imm)
- break;
- /* xilf %dst,imm */
- EMIT6_IMM(0xc0070000, dst_reg, imm);
+ if (imm != 0) {
+ /* xilf %dst,imm */
+ EMIT6_IMM(0xc0070000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
- if (imm == 0)
- break;
- /* sll %dst,imm(%r0) */
- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sll %dst,imm(%r0) */
+ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
- if (imm == 0)
- break;
- /* srl %dst,imm(%r0) */
- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* srl %dst,imm(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
- if (imm == 0)
- break;
- /* sra %dst,imm(%r0) */
- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sra %dst,imm(%r0) */
+ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index ae683aa623ac..c5b35ea129cf 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 58592dfa5cb6..c081e7e2d6e7 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo)
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2)
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz)
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo)
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 8e1d72a16759..7ceae24b0ca9 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -356,7 +356,9 @@ err_nomem:
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+ size = PAGE_ALIGN(size);
+
+ if (!sparc_dma_free_resource(cpu_addr, size))
return;
dma_make_coherent(dma_addr, size);
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
index c9da9f139694..f3a8cd491ce0 100644
--- a/arch/sparc/lib/iomap.c
+++ b/arch/sparc/lib/iomap.c
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
/* nothing to do */
}
EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4e001bbbb425..dad7f85dcdea 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE
def_bool y
+config ARCH_NR_GPIO
+ int
+ default 1024 if X86_64
+ default 512
+
config ARCH_SUSPEND_POSSIBLE
def_bool y
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index e7355f8b51c2..94834c4b5e5e 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -4,6 +4,12 @@
tune = $(call cc-option,-mtune=$(1),$(2))
+ifdef CONFIG_CC_IS_CLANG
+align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 8cb7816d03b4..193204aee880 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch)
{
+ struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+ p->mce_count = 0;
force_sig(SIGBUS);
}
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
int ret;
+ p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
- current->mce_addr = m->addr;
- current->mce_kflags = m->kflags;
- current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
- current->mce_whole_page = whole_page(m);
+ int count = ++current->mce_count;
- if (kill_current_task)
- current->mce_kill_me.func = kill_me_now;
- else
- current->mce_kill_me.func = kill_me_maybe;
+ /* First call, save all the details */
+ if (count == 1) {
+ current->mce_addr = m->addr;
+ current->mce_kflags = m->kflags;
+ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(m);
+
+ if (kill_current_task)
+ current->mce_kill_me.func = kill_me_now;
+ else
+ current->mce_kill_me.func = kill_me_maybe;
+ }
+
+ /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+ if (count > 10)
+ mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+ /* Second or later call, make sure page address matches the one from first call */
+ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+ mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+ /* Do not call task_work_add() more than once */
+ if (count > 1)
+ return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME);
}
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
} else {
/*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a6e11763763f..36098226a957 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0;
p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d))
+ if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 3112ca7786ed..4ba2a3ee4bce 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0;
start = sanitize_phys(start);
- end = sanitize_phys(end);
+
+ /*
+ * The end address passed into this function is exclusive, but
+ * sanitize_phys() expects an inclusive address.
+ */
+ end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 753f63734c13..349f780a1567 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1;
}
+static void __init xen_domu_set_legacy_features(void)
+{
+ x86_platform.legacy.rtc = 0;
+}
+
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
@@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
+ x86_platform.set_legacy_features =
+ xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1df5f01529e5..8d751939c6f3 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (pinned) {
struct page *page = pfn_to_page(pfn);
- if (static_branch_likely(&xen_struct_pages_ready))
+ pinned = false;
+ if (static_branch_likely(&xen_struct_pages_ready)) {
+ pinned = PagePinned(page);
SetPagePinned(page);
+ }
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3c88a79a319b..38b9f7684952 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
- ret = blk_iolatency_init(q);
- if (ret)
- goto err_destroy_all;
-
ret = blk_ioprio_init(q);
if (ret)
goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
if (ret)
goto err_destroy_all;
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ blk_throtl_exit(q);
+ goto err_destroy_all;
+ }
+
return 0;
err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
/* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 69a12177dfb6..16d5d5338392 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return;
+
+ /* ensure all bios are off the integrity workqueue */
+ blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+ memset(bi, 0, sizeof(*bi));
}
EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 86f87346232a..ff5caeb82542 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || !refcount_inc_not_zero(&rq->ref))
+ if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a97f33d0c59f..94665037f4a3 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
+#include <linux/init.h>
#include <linux/mc146818rtc.h>
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
+ if (!x86_platform.legacy.rtc)
+ return;
+
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
static int __init early_resume_init(void)
{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index 66b05a326910..a6f365b9cc1a 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count)
return count;
- kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock);
+ kobject_put(&attr_set->kobj);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1097f826ad70..8c176b7dae41 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
- if (no_load)
- return -ENODEV;
-
id = x86_match_cpu(hwp_support_ids);
if (id) {
+ bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+ if (hwp_forced)
+ pr_info("HWP enabled by BIOS\n");
+ else if (no_load)
+ return -ENODEV;
+
copy_cpu_funcs(&core_funcs);
/*
* Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
- intel_pstate_hwp_is_enabled()) {
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
goto hwp_cpu_matched;
}
+ pr_info("HWP not enabled\n");
} else {
+ if (no_load)
+ return -ENODEV;
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq;
- if (!strcmp(str, "no_hwp")) {
- pr_info("HWP disabled\n");
+ if (!strcmp(str, "no_hwp"))
no_hwp = 1;
- }
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dc3c6b3a00e5..d356e329e6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 8
+#define HWIP_MAX_INSTANCE 10
struct amd_powerplay {
void *pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3003ee1c9487..1d41c2c00623 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+ return r;
+}
+
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ec028cf963f5..3bc52b2c604f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 277128846dd1..463b9c0283f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent;
int r, i;
-
-
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
/* Register debugfs entries for amdgpu_ttm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 41c6b3aacd37..ab3794c42d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ goto init_failed;
+
r = amdgpu_device_ip_hw_init_phase1(adev);
if (r)
goto init_failed;
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!");
} else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+ if (r)
+ goto out;
+
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c7797eac83c3..9ff600a38559 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break;
default:
adev->gmc.tmz_enabled = false;
- dev_warn(adev->dev,
+ dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index dc44c946a244..98732518543e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -757,7 +757,7 @@ Out:
return res;
}
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
{
return RAS_MAX_RECORD_COUNT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index f95fc61b3021..6bb00578bfbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num);
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 7b634a1517f9..0554576d3695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (!ent)
- return -ENOMEM;
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 38dade421d46..94126dc39688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- if (bo->type == ttm_bo_type_device &&
- new_mem->mem_type == TTM_PL_VRAM &&
- old_mem->mem_type != TTM_PL_VRAM) {
- /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
- * accesses the BO after it's moved.
- */
- abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- }
-
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 16a57b70cc1a..98d1b3ab3a46 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
if (!kfd)
return NULL;
- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
- * 32 and 64-bit requests are possible and must be
- * supported.
- */
- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
- if (device_info->needs_pci_atomics &&
- !kfd->pci_atomic_requested) {
- dev_info(kfd_device,
- "skipped device %x:%x, PCI rejects atomics\n",
- pdev->vendor, pdev->device);
- kfree(kfd);
- return NULL;
- }
-
kfd->kgd = kgd;
kfd->device_info = device_info;
kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+ */
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+ if (!kfd->pci_atomic_requested &&
+ kfd->device_info->needs_pci_atomics &&
+ (!kfd->device_info->no_atomic_fw_version ||
+ kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+ kfd->pdev->vendor, kfd->pdev->device,
+ kfd->mec_fw_version,
+ kfd->device_info->no_atomic_fw_version);
+ return false;
+ }
+
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
int err = 0;
err = kfd_iommu_resume(kfd);
- if (err) {
+ if (err)
dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- return err;
- }
+ return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+ int err = 0;
err = kfd->dqm->ops.start(kfd->dqm);
if (err) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index ab83b0de6b22..6d8f9bb2d905 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -207,6 +207,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ uint32_t no_atomic_fw_version;
unsigned int num_sdma_engines;
unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9b1fc54555ee..66c799f5c7cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
uint32_t agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+ memset(pa_config, 0, sizeof(*pa_config));
+
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
@@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
+ if (dm->vblank_control_workqueue) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
- INIT_WORK(&work->work, vblank_control_worker);
- work->dm = dm;
- work->acrtc = acrtc;
- work->enable = enable;
+ INIT_WORK(&work->work, vblank_control_worker);
+ work->dm = dm;
+ work->acrtc = acrtc;
+ work->enable = enable;
- if (acrtc_state->stream) {
- dc_stream_retain(acrtc_state->stream);
- work->stream = acrtc_state->stream;
- }
+ if (acrtc_state->stream) {
+ dc_stream_retain(acrtc_state->stream);
+ work->stream = acrtc_state->stream;
+ }
- queue_work(dm->vblank_control_workqueue, &work->work);
+ queue_work(dm->vblank_control_workqueue, &work->work);
+ }
#endif
return 0;
@@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
struct dc_stream_state *stream = NULL;
struct drm_connector *connector;
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j, clock, bpp;
+ int i, j, clock;
int vcpi, pbn_div, pbn = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
pbn_div = dm_mst_get_pbn_divider(stream->link);
- bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10;
- pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
pbn, pbn_div,
@@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
struct edid *edid)
{
@@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
+
+ amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
- * hot-plug, headless s3, dpms
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_state->crtc && old_state->crtc->enabled) &&
+ state->crtc && state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
*
* Handles: DESIRED -> DESIRED (Special case)
*/
@@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming.
*/
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
amdgpu_dm_psr_disable_all(dm);
}
@@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int ret, i;
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
goto fail;
- ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret)
goto fail;
#endif
@@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) {
- DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ drm_dbg_atomic(dev,
+ "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 1bcba6943fd7..7af0d58c231b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
uint32_t num_slices_h;
uint32_t num_slices_v;
uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
- int pbn;
- bool dsc_enabled;
- int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
};
static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
- struct dc_link *dc_link)
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_vars *vars)
{
int i;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
int count = 0;
bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].aconnector = aconnector;
params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
}
/* Try no compression */
for (i = 0; i < count; i++) {
+ vars[i].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false;
vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
}
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
return false;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index b38bd68121ce..900d3f7a8498 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -39,8 +39,17 @@ void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
+};
+
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state);
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index c9f47d167472..b1bf80da3a55 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
depth = *pcpu;
put_cpu_ptr(&fpu_recursion_depth);
- ASSERT(depth > 1);
+ ASSERT(depth >= 1);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 8bd7f42a8053..1e44b13c1c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
int dc_link_get_backlight_level(const struct dc_link *link)
{
-
struct abm *abm = get_abm_from_stream_res(link);
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
- if (abm == NULL || abm->funcs->get_current_backlight == NULL)
- return DC_ERROR_UNEXPECTED;
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
- return (int) abm->funcs->get_current_backlight(abm);
+ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+ return panel_cntl->funcs->get_current_backlight(panel_cntl);
+ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+ return (int) abm->funcs->get_current_backlight(abm);
+ else
+ return DC_ERROR_UNEXPECTED;
}
int dc_link_get_target_backlight_pwm(const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 330edd666b7d..f6dbc5a74757 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
#include "dm_services.h"
#include "dc.h"
#include "dc_link_dp.h"
@@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT)
- break;
- else if (do_fallback) {
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ dc_link_detect_sink(link, &type);
+ if (type == dc_connection_none)
+ break;
+ } else if (do_fallback) {
decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e92339235863..e8570060d007 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -49,7 +49,6 @@
static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{
uint64_t current_backlight;
- uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
return (uint32_t)(current_backlight);
}
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
index 8a08ecc34c69..4884a4e1f261 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
@@ -33,63 +33,47 @@
#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging
#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible
#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT 6
+#define TABLE_SMU_METRICS 6 // Called by Driver
+#define TABLE_COUNT 7
-#define NUM_DSPCLK_LEVELS 8
-#define NUM_SOCCLK_DPM_LEVELS 8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS 4
-#define NUM_MEMCLK_DPM_LEVELS 4
+typedef struct SmuMetricsTable_t {
+ //CPU status
+ uint16_t CoreFrequency[6]; //[MHz]
+ uint32_t CorePower[6]; //[mW]
+ uint16_t CoreTemperature[6]; //[centi-Celsius]
+ uint16_t L3Frequency[2]; //[MHz]
+ uint16_t L3Temperature[2]; //[centi-Celsius]
+ uint16_t C0Residency[6]; //Percentage
-#define NUMBER_OF_PSTATES 8
-#define NUMBER_OF_CORES 8
+ // GFX status
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t GfxTemperature; //[centi-Celsius]
-typedef enum {
- S3_TYPE_ENTRY,
- S5_TYPE_ENTRY,
-} Sleep_Type_e;
+ // SOC IP info
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
-typedef enum {
- GFX_OFF = 0,
- GFX_ON = 1,
-} GFX_Mode_e;
+ // power, VF info for CPU/GFX telemetry rails, and then socket power total
+ uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t CurrentSocketPower; //[mW]
-typedef enum {
- CPU_P0 = 0,
- CPU_P1,
- CPU_P2,
- CPU_P3,
- CPU_P4,
- CPU_P5,
- CPU_P6,
- CPU_P7
-} CPU_PState_e;
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+ uint16_t Spare;
-typedef enum {
- CPU_CORE0 = 0,
- CPU_CORE1,
- CPU_CORE2,
- CPU_CORE3,
- CPU_CORE4,
- CPU_CORE5,
- CPU_CORE6,
- CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
-typedef enum {
- DF_DPM0 = 0,
- DF_DPM1,
- DF_DPM2,
- DF_DPM3,
- DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
- GFX_DPM0 = 0,
- GFX_DPM1,
- GFX_DPM2,
- GFX_DPM3,
- GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
+ uint32_t Accnt;
+} SmuMetrics_t;
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 6f1b1b50d527..18b862a90fbe 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -226,7 +226,10 @@
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \
- __SMU_DUMMY_MAP(BoardPowerCalibration),
+ __SMU_DUMMY_MAP(BoardPowerCalibration), \
+ __SMU_DUMMY_MAP(RequestGfxclk), \
+ __SMU_DUMMY_MAP(ForceGfxVid), \
+ __SMU_DUMMY_MAP(UnforceGfxVid),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
index 6e6088760b18..909a86aa60f3 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
@@ -65,6 +65,13 @@
#define PPSMC_MSG_SetDriverTableVMID 0x34
#define PPSMC_MSG_SetSoftMinCclk 0x35
#define PPSMC_MSG_SetSoftMaxCclk 0x36
-#define PPSMC_Message_Count 0x37
+#define PPSMC_MSG_GetGfxFrequency 0x37
+#define PPSMC_MSG_GetGfxVid 0x38
+#define PPSMC_MSG_ForceGfxFreq 0x39
+#define PPSMC_MSG_UnForceGfxFreq 0x3A
+#define PPSMC_MSG_ForceGfxVid 0x3B
+#define PPSMC_MSG_UnforceGfxVid 0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D
+#define PPSMC_Message_Count 0x3E
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 3ab1ce4d3419..04863a797115 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+ (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu,
true,
SMU_FEATURE_COUNT);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index e343cc218990..082f01893f3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index b05f9541accc..3d4c65bc29dc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -44,6 +44,27 @@
#undef pr_info
#undef pr_debug
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN 1000
+#define CYAN_SKILLFISH_SCLK_MAX 2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN 700
+#define CYAN_SKILLFISH_VDDC_MAX 1129
+#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe
+
+static struct gfx_user_settings {
+ uint32_t sclk;
+ uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
+ MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0),
+ MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0),
+ MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(SMU_METRICS),
};
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ sizeof(SmuMetrics_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+
+err1_out:
+ smu_table->gpu_metrics_table_size = 0;
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = cyan_skillfish_tables_init(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->gpu_metrics_table_size = 0;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_CURR_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_CURR_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_CURR_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[0];
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDSOC,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf)
+{
+ int ret = 0, size = 0;
+ uint32_t cur_value = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+ size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+ break;
+ case SMU_OD_RANGE:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_FCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ default:
+ dev_warn(smu->adev->dev, "Unsupported clock type\n");
+ return ret;
+ }
+
+ return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t feature_mask[2];
+ uint64_t feature_enabled;
+
+ /* we need to re-init after suspend so return false */
+ if (adev->in_suspend)
+ return false;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32);
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int i, ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_soc_power = metrics.Current.Power[0];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ for (i = 0; i < 6; i++) {
+ gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+ gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+ gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+ }
+
+ for (i = 0; i < 2; i++) {
+ gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+ gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+ }
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ uint32_t vid;
+
+ switch (type) {
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (size != 3 || input[0] != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+ input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+ input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = input[1];
+ cyan_skillfish_user_settings.vddc = input[2];
+
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+ cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+ cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+ (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+ cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+ cyan_skillfish_user_settings.sclk, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set sclk failed!\n");
+ return ret;
+ }
+
+ if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+ return ret;
+ }
+ } else {
+ /*
+ * PMFW accepts SVI2 VID code, convert voltage to VID:
+ * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+ */
+ vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Force vddc failed!\n");
+ return ret;
+ }
+ }
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
+ .init_smc_tables = cyan_skillfish_init_smc_tables,
+ .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+ .read_sensor = cyan_skillfish_read_sensor,
+ .print_clk_levels = cyan_skillfish_print_clk_levels,
+ .is_dpm_running = cyan_skillfish_is_dpm_running,
+ .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+ .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
.register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->message_map = cyan_skillfish_message_map;
+ smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a5fc5d7cb6c7..b1ad451af06b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
uint32_t min_value, max_value;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ /*
+ * This aims the case below:
+ * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+ *
+ * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+ * make that possible, PMFW needs to acknowledge the dstate transition
+ * process for both gfx(function 0) and audio(function 1) function of
+ * the ASIC.
+ *
+ * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+ * device representing the audio function of the ASIC. And that means
+ * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+ * possible runpm suspend kicked on the ASIC. However without the dstate
+ * transition notification from audio function, pmfw cannot handle the
+ * BACO in/exit correctly. And that will cause driver hang on runpm
+ * resuming.
+ *
+ * To address this, we revert to legacy message way(driver masters the
+ * timing for BACO in/exit) on sound driver missing.
+ */
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 5e292c3f5050..ca57221e3962 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
uint32_t smu_version;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 3a3421452e57..f6ef0ce6e9e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 5aa175e12a78..145f13b8c977 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ab652028e003..5019903db492 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- size = sysfs_emit(buf, "%s:\n", "MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 627ba2eec7fd..a403657151ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 66711ab24c15..843d2cbfc71d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret;
}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+ bool snd_driver_loaded;
+
+ /*
+ * If the ASIC comes with no audio function, we always assume
+ * it is "enabled".
+ */
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return true;
+
+ snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+ pci_dev_put(p);
+
+ return snd_driver_loaded;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 16993daa2ae0..beea03810bca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+ if (!*buf || !offset)
+ return;
+
+ *offset = offset_in_page(*buf);
+ *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
#endif
#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 76d38561c910..cf741c5c82d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
- etnaviv_iommu_context_get(mmu_context);
- gpu->mmu_context = mmu_context;
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8f1b5af47dd6..f0b2540e60e4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- etnaviv_iommu_context_get(mmu_context);
- mapping->context = mmu_context;
+ mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4dd7d9d541c0..486259e154af 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
submit->ctx = file->driver_priv;
- etnaviv_iommu_context_get(submit->ctx->mmu);
- submit->mmu_context = submit->ctx->mmu;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index c297fffe06eb..cc5b07f86346 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
return 0;
}
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
+
+ gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
- gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- if (!gpu->mmu_context) {
- etnaviv_iommu_context_get(submit->mmu_context);
- gpu->mmu_context = submit->mmu_context;
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- etnaviv_iommu_context_get(gpu->mmu_context);
- submit->prev_mmu_context = gpu->mmu_context;
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->mmu_context) {
+ if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
- etnaviv_iommu_context_put(gpu->mmu_context);
- gpu->mmu_context = NULL;
+ gpu->fe_running = false;
}
gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8ea48697d132..1c75c8ed5bce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
bool initialized;
+ bool fe_running;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 1a7c89a67bea..afe5dd6a9925 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f8bf488e9d71..d664ae29ae20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index dab1b58006d8..9fb1a2aadbcb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index d1d6902fd13b..e4a0b7d09c2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
+ return ctx;
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 642a5b5a1b81..335ba9f43d8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 04175f359fd6..abe3d61b6243 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
- sizeof(intel_dp->edp_dpcd))
+ sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+ }
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 053a3c2f7267..508a514c5e37 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
}
if (ret)
- intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cff72679ad7c..9ccf4b29b82e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
if (vm)
i915_vm_close(vm);
- if (ctx->syncobj)
- drm_syncobj_put(ctx->syncobj);
-
ctx->file_priv = ERR_PTR(-EBADF);
/*
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index ffae7df5e4d7..4a6bb64c3a35 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL;
goto out_import;
}
- import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import));
err = PTR_ERR(import);
+ } else {
+ err = 0;
}
dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import;
}
- import_obj = to_intel_bo(import);
-
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
if (err) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index b20f5621f62b..a2c34e5a1c54 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT;
}
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+ unsigned long size)
+{
+ if (HAS_LMEM(i915)) {
+ struct intel_memory_region *sys_region =
+ i915->mm.regions[INTEL_REGION_SMEM];
+
+ return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+ }
+
+ return i915_gem_object_create_internal(i915, size);
+}
+
static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size,
int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset;
int ret;
- obj = i915_gem_object_create_internal(i915, size);
+ obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj);
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next;
int loop, err = 0;
u64 offset;
+ int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */
disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
/* Too large */
- if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
}
/* Fill the hole, further allocation attempts should then fail */
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
- if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL;
goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
- if (HAS_LMEM(i915))
+ if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED)
return false;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d812b27835f8..591a5224287e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
- return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+ return freq;
}
static u32 intel_rps_get_req(u32 pureq)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index b104fb7607eb..86c318516e14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
__uc_free_load_err_log(uc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return intel_guc_ct_enabled(&guc->ct);
-}
-
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(!guc_communication_enabled(guc));
+ GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
struct drm_i915_private *i915 = gt->i915;
int ret;
- GEM_BUG_ON(guc_communication_enabled(guc));
+ GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return 0;
/* Make sure we enable communication if and only if it's disabled */
- GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+ GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication)
guc_enable_communication(guc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index b0ece71aefde..ce774579c89d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
- args->v0.pwrsrc = -ENOSYS;
+ args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0473583dcdac..482fb0ae6cb5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
#endif
if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
- rdev->agp = radeon_agp_head_init(rdev->ddev);
+ rdev->agp = radeon_agp_head_init(dev);
if (rdev->agp) {
rdev->agp->agp_mtrr = arch_phys_wc_add(
rdev->agp->agp_info.aper_base,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4a1115043114..b4b4653fe301 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
+ struct drm_crtc *crtc = encoder->crtc;
/*
- * At boot, connector->state will be NULL. Since we don't know the
+ * At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time.
*/
- if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return;
- if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_crtc *crtc = connector->state->crtc;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts;
u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
- if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+ if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
@@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
- return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret)
- return ret;
-
- return 0;
-}
-#endif
-
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
- vc4_hdmi_runtime_resume,
- NULL)
-};
-
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
- .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a533a90e3904..a7aeb3c132c9 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
static void b53_mdio_remove(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
- struct dsa_switch *ds = dev->ds;
- dsa_unregister_switch(ds);
+ if (!dev)
+ return;
+
+ b53_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id b53_of_match[] = {
@@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
static struct mdio_driver b53_mdio_driver = {
.probe = b53_mdio_probe,
.remove = b53_mdio_remove,
+ .shutdown = b53_mdio_shutdown,
.mdiodrv.driver = {
.name = "bcm53xx",
.of_match_table = b53_of_match,
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 82680e083cc2..ae4c79d39bc0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,9 +316,21 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm3384-switch" },
{ .compatible = "brcm,bcm6328-switch" },
@@ -331,6 +343,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
.remove = b53_mmap_remove,
+ .shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
.of_match_table = b53_mmap_of_table,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 5be8fe000037..544101e74bca 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -227,6 +227,11 @@ static inline void b53_switch_remove(struct b53_device *dev)
dsa_unregister_switch(dev->ds);
}
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+ dsa_switch_shutdown(dev->ds);
+}
+
#define b53_build_op(type_op_size, val_type) \
static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \
u8 reg, val_type val) \
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index ecb9f7f6b335..01e37b75471e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -321,9 +321,21 @@ static int b53_spi_remove(struct spi_device *spi)
if (dev)
b53_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id b53_spi_of_match[] = {
{ .compatible = "brcm,bcm5325" },
{ .compatible = "brcm,bcm5365" },
@@ -344,6 +356,7 @@ static struct spi_driver b53_spi_driver = {
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
+ .shutdown = b53_spi_shutdown,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 3f4249de70c5..4591bb1c05d2 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -629,17 +629,34 @@ static int b53_srab_probe(struct platform_device *pdev)
static int b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
- struct b53_srab_priv *priv = dev->priv;
- b53_srab_intr_set(priv, false);
+ if (!dev)
+ return 0;
+
+ b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
.remove = b53_srab_remove,
+ .shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
.of_match_table = b53_srab_of_match,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index aa713936d77c..a86ddc4bb897 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port, count = 0;
- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
continue;
if (priv->port_sts[port].enabled)
@@ -1514,6 +1514,9 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return 0;
+
priv->wol_ports_mask = 0;
/* Disable interrupts */
bcm_sf2_intr_disable(priv);
@@ -1525,6 +1528,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1532,6 +1537,9 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return;
+
/* For a kernel about to be kexec'd we want to keep the GPHY on for a
* successful MDIO bus scan to occur. If we did turn off the GPHY
* before (e.g: port_disable), this will also power it back on.
@@ -1540,6 +1548,10 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
*/
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+ dsa_switch_shutdown(priv->dev->ds);
+
+ platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bfdf3324aac3..e638e3eea911 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -340,10 +340,29 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct dsa_loop_priv *ps = ds->priv;
+ struct dsa_loop_priv *ps;
+
+ if (!ds)
+ return;
+
+ ps = ds->priv;
dsa_unregister_switch(ds);
dev_put(ps->netdev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver dsa_loop_drv = {
@@ -352,6 +371,7 @@ static struct mdio_driver dsa_loop_drv = {
},
.probe = dsa_loop_drv_probe,
.remove = dsa_loop_drv_remove,
+ .shutdown = dsa_loop_drv_shutdown,
};
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 542cfc4ccb08..354655f9ed00 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1916,6 +1916,9 @@ static int hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+ if (!hellcreek)
+ return 0;
+
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
@@ -1924,6 +1927,18 @@ static int hellcreek_remove(struct platform_device *pdev)
return 0;
}
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ if (!hellcreek)
+ return;
+
+ dsa_switch_shutdown(hellcreek->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct hellcreek_platform_data de1soc_r1_pdata = {
.name = "r4c30",
.num_ports = 4,
@@ -1946,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
.remove = hellcreek_remove,
+ .shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
.of_match_table = hellcreek_of_match,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d7ce281570b5..89f920289ae2 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1379,6 +1379,12 @@ int lan9303_remove(struct lan9303 *chip)
}
EXPORT_SYMBOL(lan9303_remove);
+void lan9303_shutdown(struct lan9303 *chip)
+{
+ dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
index 11f590b64701..c7f73efa50f0 100644
--- a/drivers/net/dsa/lan9303.h
+++ b/drivers/net/dsa/lan9303.h
@@ -10,3 +10,4 @@ extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
int lan9303_probe(struct lan9303 *chip, struct device_node *np);
int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 9bffaef65a04..8ca4713310fa 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -67,13 +67,28 @@ static int lan9303_i2c_probe(struct i2c_client *client,
static int lan9303_i2c_remove(struct i2c_client *client)
{
- struct lan9303_i2c *sw_dev;
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
- sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return -ENODEV;
+ return 0;
+
+ lan9303_remove(&sw_dev->chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
- return lan9303_remove(&sw_dev->chip);
+ i2c_set_clientdata(client, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -97,6 +112,7 @@ static struct i2c_driver lan9303_i2c_driver = {
},
.probe = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
+ .shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
};
module_i2c_driver(lan9303_i2c_driver);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 9cbe80460b53..bbb7032409ba 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,6 +138,20 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -155,6 +169,7 @@ static struct mdio_driver lan9303_mdio_driver = {
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
+ .shutdown = lan9303_mdio_shutdown,
};
mdio_module_driver(lan9303_mdio_driver);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 267324889dd6..3ff4b7e177f3 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2184,6 +2184,9 @@ static int gswip_remove(struct platform_device *pdev)
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
+ if (!priv)
+ return 0;
+
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
@@ -2197,9 +2200,23 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
.cpu_port = 6,
@@ -2223,6 +2240,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
static struct platform_driver gswip_driver = {
.probe = gswip_probe,
.remove = gswip_remove,
+ .shutdown = gswip_shutdown,
.driver = {
.name = "gswip",
.of_match_table = gswip_of_match,
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index ea7550d1b634..866767b70d65 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -94,6 +94,8 @@ static int ksz8795_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -101,8 +103,15 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz8795_dt_ids[] = {
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 11293485138c..5883fa7edda2 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -191,6 +191,18 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ksz8863_dt_ids[] = {
@@ -203,6 +215,7 @@ MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
static struct mdio_driver ksz8863_driver = {
.probe = ksz8863_smi_probe,
.remove = ksz8863_smi_remove,
+ .shutdown = ksz8863_smi_shutdown,
.mdiodrv.driver = {
.name = "ksz8863-switch",
.of_match_table = ksz8863_dt_ids,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 4e053a25d077..f3afb8b8c4cc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -56,7 +56,10 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- ksz_switch_remove(dev);
+ if (dev)
+ ksz_switch_remove(dev);
+
+ i2c_set_clientdata(i2c, NULL);
return 0;
}
@@ -65,8 +68,15 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 15bc11b3cda4..e3cb0e6c9f6f 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -72,6 +72,8 @@ static int ksz9477_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -79,8 +81,10 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz9477_dt_ids[] = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index d0cba2d1cd68..094737e5084a 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -3286,6 +3286,9 @@ mt7530_remove(struct mdio_device *mdiodev)
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
int ret = 0;
+ if (!priv)
+ return;
+
ret = regulator_disable(priv->core_pwr);
if (ret < 0)
dev_err(priv->dev,
@@ -3301,11 +3304,26 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
.remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
.mdiodrv.driver = {
.name = "mt7530",
.of_match_table = mt7530_of_match,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 24b8219fd607..a4c6eb9a52d0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -290,7 +290,24 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ if (!ds)
+ return;
+
dsa_unregister_switch(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6060_of_match[] = {
@@ -303,6 +320,7 @@ static const struct of_device_id mv88e6060_of_match[] = {
static struct mdio_driver mv88e6060_driver = {
.probe = mv88e6060_probe,
.remove = mv88e6060_remove,
+ .shutdown = mv88e6060_shutdown,
.mdiodrv.driver = {
.name = "mv88e6060",
.of_match_table = mv88e6060_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c45ca2473743..8ab0be793811 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3071,7 +3071,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
- mv88e6xxx_teardown_devlink_regions(ds);
+ mv88e6xxx_teardown_devlink_regions_global(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3215,7 +3215,7 @@ unlock:
if (err)
goto out_resources;
- err = mv88e6xxx_setup_devlink_regions(ds);
+ err = mv88e6xxx_setup_devlink_regions_global(ds);
if (err)
goto out_params;
@@ -3229,6 +3229,16 @@ out_resources:
return err;
}
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+ return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
/* prod_id for switch families which do not have a PHY model number */
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
@@ -6116,6 +6126,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
+ .port_setup = mv88e6xxx_port_setup,
+ .port_teardown = mv88e6xxx_port_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -6389,7 +6401,12 @@ out:
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_chip *chip;
+
+ if (!ds)
+ return;
+
+ chip = ds->priv;
if (chip->info->ptp_support) {
mv88e6xxx_hwtstamp_free(chip);
@@ -6410,6 +6427,20 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -6433,6 +6464,7 @@ MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
static struct mdio_driver mv88e6xxx_driver = {
.probe = mv88e6xxx_probe,
.remove = mv88e6xxx_remove,
+ .shutdown = mv88e6xxx_shutdown,
.mdiodrv.driver = {
.name = "mv88e6085",
.of_match_table = mv88e6xxx_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 0c0f5ea6680c..381068395c63 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
},
};
-static void
-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
dsa_devlink_region_destroy(chip->regions[i]);
}
-static void
-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
- int port)
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
dsa_devlink_region_destroy(chip->ports[port].region);
}
-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip,
- int port)
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region *region;
region = dsa_devlink_port_region_create(ds,
@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
return 0;
}
-static void
-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
-{
- int port;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-}
-
-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
-{
- int port;
- int err;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
- err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
- if (err)
- goto out;
- }
-
- return 0;
-
-out:
- while (port-- > 0)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-
- return err;
-}
-
-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
@@ -753,30 +722,6 @@ out:
return PTR_ERR(region);
}
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
- if (err)
- return err;
-
- err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
- if (err)
- mv88e6xxx_teardown_devlink_regions_global(chip);
-
- return err;
-}
-
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- mv88e6xxx_teardown_devlink_regions_ports(chip);
- mv88e6xxx_teardown_devlink_regions_global(chip);
-}
-
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
index 3d72db3dcf95..65ce6a6858b9 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.h
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3656e67af789..a3a9636430d6 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019-2021 NXP Semiconductors
+/* Copyright 2019-2021 NXP
*
* This is an umbrella module for all network switches that are
* register-compatible with Ocelot and that perform I/O to their host CPU
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 5854bab43327..54024b6f9498 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#ifndef _MSCC_FELIX_H
#define _MSCC_FELIX_H
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index f966a253d1c7..11b42fd812e4 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2017 Microsemi Corporation
- * Copyright 2018-2019 NXP Semiconductors
+ * Copyright 2018-2019 NXP
*/
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
@@ -1472,9 +1472,10 @@ err_pci_enable:
static void felix_pci_remove(struct pci_dev *pdev)
{
- struct felix *felix;
+ struct felix *felix = pci_get_drvdata(pdev);
- felix = pci_get_drvdata(pdev);
+ if (!felix)
+ return;
dsa_unregister_switch(felix->ds);
@@ -1482,6 +1483,20 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+ struct felix *felix = pci_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ pci_set_drvdata(pdev, NULL);
}
static struct pci_device_id felix_ids[] = {
@@ -1498,6 +1513,7 @@ static struct pci_driver felix_vsc9959_pci_driver = {
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
+ .shutdown = felix_pci_shutdown,
};
module_pci_driver(felix_vsc9959_pci_driver);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index deae923c8b7a..de1d34a1f1e4 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1245,18 +1245,33 @@ err_alloc_felix:
static int seville_remove(struct platform_device *pdev)
{
- struct felix *felix;
+ struct felix *felix = platform_get_drvdata(pdev);
- felix = platform_get_drvdata(pdev);
+ if (!felix)
+ return 0;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void seville_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = platform_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id seville_of_match[] = {
{ .compatible = "mscc,vsc9953-switch" },
{ },
@@ -1266,6 +1281,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
+ .shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
.of_match_table = of_match_ptr(seville_of_match),
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 563d8a279030..a6bfb6abc51a 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1083,6 +1083,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
unsigned int i;
+ if (!priv)
+ return;
+
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
struct ar9331_sw_port *port = &priv->port[i];
@@ -1094,6 +1097,20 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ar9331_sw_of_match[] = {
@@ -1104,6 +1121,7 @@ static const struct of_device_id ar9331_sw_of_match[] = {
static struct mdio_driver ar9331_sw_mdio_driver = {
.probe = ar9331_sw_probe,
.remove = ar9331_sw_remove,
+ .shutdown = ar9331_sw_shutdown,
.mdiodrv.driver = {
.name = AR9331_SW_NAME,
.of_match_table = ar9331_sw_of_match,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index bda5a9bf4f52..a984f06f6f04 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1880,10 +1880,27 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
+ if (!priv)
+ return;
+
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
@@ -1940,6 +1957,7 @@ static const struct of_device_id qca8k_of_match[] = {
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 8e49d4f85d48..2fcfd917b876 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
smi->slave_mii_bus->parent = smi->dev;
smi->ds->slave_mii_bus = smi->slave_mii_bus;
- ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
@@ -464,16 +464,33 @@ static int realtek_smi_probe(struct platform_device *pdev)
static int realtek_smi_remove(struct platform_device *pdev)
{
- struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return 0;
dsa_unregister_switch(smi->ds);
if (smi->slave_mii_bus)
of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return;
+
+ dsa_switch_shutdown(smi->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id realtek_smi_of_match[] = {
{
.compatible = "realtek,rtl8366rb",
@@ -495,6 +512,7 @@ static struct platform_driver realtek_smi_driver = {
},
.probe = realtek_smi_probe,
.remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
};
module_platform_driver(realtek_smi_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 387a1f2f161c..5bbf1707f2af 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 05c7f4ca3b1a..0569ff066634 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
- * Copyright 2020 NXP Semiconductors
+ * Copyright 2020 NXP
*/
#include "sja1105.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 6c10ffa968ce..72b9b39b0989 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include "sja1105.h"
#include "sja1105_vl.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 741e965f6068..0f1bba0076a8 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3340,13 +3340,29 @@ static int sja1105_probe(struct spi_device *spi)
static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
- struct dsa_switch *ds = priv->ds;
- dsa_unregister_switch(ds);
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
return 0;
}
+static void sja1105_shutdown(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
@@ -3370,6 +3386,7 @@ static struct spi_driver sja1105_driver = {
},
.probe = sja1105_probe,
.remove = sja1105_remove,
+ .shutdown = sja1105_shutdown,
};
module_spi_driver(sja1105_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 705d3900e43a..215dd17ca790 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021, NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d60a530d0272..d3c9ad6d39d4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 7a422ef4deb6..baba204ad62f 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include "sja1105_static_config.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index bce0f5c03d0b..6a372d5f22ae 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index 061a1e0235ad..d55572994e1f 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include <net/tc_act/tc_gate.h>
#include <linux/dsa/8021q.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
index 173d78963fed..51fba0dce91a 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.h
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#ifndef _SJA1105_VL_H
#define _SJA1105_VL_H
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 19ce4aa0973b..a4b1447ff055 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1225,6 +1225,12 @@ int vsc73xx_remove(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_remove);
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+ dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 2a57f337b2a2..fe4b154a0a57 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -116,7 +116,26 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
- return vsc73xx_remove(&vsc_platform->vsc);
+ if (!vsc_platform)
+ return 0;
+
+ vsc73xx_remove(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ if (!vsc_platform)
+ return;
+
+ vsc73xx_shutdown(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
}
static const struct vsc73xx_ops vsc73xx_platform_ops = {
@@ -144,6 +163,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
.remove = vsc73xx_platform_remove,
+ .shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 81eca4a5781d..645398901e05 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -163,7 +163,26 @@ static int vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
- return vsc73xx_remove(&vsc_spi->vsc);
+ if (!vsc_spi)
+ return 0;
+
+ vsc73xx_remove(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ if (!vsc_spi)
+ return;
+
+ vsc73xx_shutdown(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct vsc73xx_ops vsc73xx_spi_ops = {
@@ -191,6 +210,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
+ .shutdown = vsc73xx_spi_shutdown,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 7478f8d4e0a9..30b951504e65 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -27,3 +27,4 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 130abb0f1438..469420941054 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -822,6 +822,12 @@ void xrs700x_switch_remove(struct xrs700x *priv)
}
EXPORT_SYMBOL(xrs700x_switch_remove);
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+ dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
index ff62cf61b091..4d58257471d2 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.h
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -40,3 +40,4 @@ struct xrs700x {
struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
int xrs700x_switch_register(struct xrs700x *priv);
void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 489d9385b4f0..6deae388a0d6 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -109,11 +109,28 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
+ if (!priv)
+ return 0;
+
xrs700x_switch_remove(priv);
+ i2c_set_clientdata(i2c, NULL);
+
return 0;
}
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ i2c_set_clientdata(i2c, NULL);
+}
+
static const struct i2c_device_id xrs700x_i2c_id[] = {
{ "xrs700x-switch", 0 },
{},
@@ -137,6 +154,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
},
.probe = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
+ .shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
};
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 44f58bee04a4..d01cf1073d49 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -136,7 +136,24 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
{
struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+ if (!priv)
+ return;
+
xrs700x_switch_remove(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
@@ -155,6 +172,7 @@ static struct mdio_driver xrs700x_mdio_driver = {
},
.probe = xrs700x_mdio_probe,
.remove = xrs700x_mdio_remove,
+ .shutdown = xrs700x_mdio_shutdown,
};
mdio_module_driver(xrs700x_mdio_driver);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index dee9ff74d6d6..d4b1976ee69b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
if (deep) {
/* Reinitialize Nic/Vecs objects */
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ }
+ if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
goto err_exit;
- }
- if (netif_running(nic->ndev)) {
ret = aq_nic_start(nic);
if (ret)
goto err_exit;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 85fa0ab7201c..9513cfb5ba58 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
/* If no MAC address assigned via device tree, check SPROM */
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 037767b370d5..62f84cc91e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -391,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
* netif_tx_queue_stopped().
*/
smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
@@ -764,7 +764,7 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
}
@@ -2416,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
@@ -3640,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
- MAX_SKB_FRAGS + 1);
+ BNXT_MIN_TX_DESC_CNT);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec046e7a2484..19fe6478e9b4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -629,6 +629,11 @@ struct nqe_cn {
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b056e3c29bbd..7260910e75fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS))
+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
if (netif_running(dev))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 0c69409fe0d0..3cbfa8b4e265 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
static void enetc_rx_net_dim(struct enetc_int_vector *v)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
v->comp_cnt++;
@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- cpumask_t cpu_mask;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(irq, &cpu_mask);
+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
}
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index ee1468e3eaa3..91f02c505028 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*
* The Integrated Endpoint Register Block (IERB) is configured by pre-boot
* software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
index b3b774e0998a..c2ce47c4be9f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 80bd5c629fa0..ec87b370bba1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4176,5 +4176,4 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
-MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 718c16d686fa..bb9b026ae88e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -2445,12 +2445,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
return;
}
@@ -2463,8 +2463,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a0d0fa4f6a24..5160b0481656 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3667,7 +3667,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%u) rst failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
return ret;
}
@@ -3682,7 +3683,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%u) failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
}
return 0;
@@ -4747,6 +4749,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
return 0;
}
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = vport->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
@@ -4756,30 +4776,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
u8 hash_algo;
int ret, i;
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- hash_algo = vport->rss_algo;
- break;
- default:
- return -EINVAL;
- }
-
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- vport->rss_algo = hash_algo;
+ } else {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
+ vport->rss_hash_key);
+ if (ret)
+ return ret;
}
+ vport->rss_algo = hash_algo;
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6633,10 +6650,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1, hdev->num_req_vfs);
return -EINVAL;
}
@@ -9825,6 +9845,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -10731,7 +10754,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
return 0;
}
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
{
struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
@@ -10749,7 +10773,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
}
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10768,7 +10794,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u16 reset_try_times = 0;
- int reset_status;
+ u8 reset_status;
u16 queue_gid;
int ret;
u16 i;
@@ -10784,7 +10810,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
+
if (reset_status)
break;
@@ -11477,11 +11507,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
struct hclge_vport *vport = &hdev->vport[i];
int ret;
- /* Send cmd to clear VF's FUNC_RST_ING */
+ /* Send cmd to clear vport's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%u) rst failed %d!\n",
+ "clear vport(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 2ce5302c5956..65d78ee4d65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
- vport->vport_id);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return;
+ }
+
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 78d5bf1ea561..44618cc4cca1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a69e892277b3..5fdac8685f95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
return 0;
}
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = hdev->rss_cfg.hash_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 hash_algo;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- key);
- if (ret)
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "invalid hfunc type %u\n", hfunc);
return ret;
+ }
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key,
HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
}
+ rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b0b6f90deb7d..ed8ea63bb172 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -335,6 +335,7 @@ config IGC
tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
default n
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index b5f68f66d42a..7bb1f20002b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
int hash;
int i;
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ return -EEXIST;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a2f61a87cef8..8af7f2827322 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
int nhoff = skb_network_offset(skb);
int ret = 0;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_port_state *port_state = &priv->port_state;
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ bool up;
+
+ if (mlx4_en_QUERY_PORT(mdev, priv->port))
+ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+ if (up == netif_carrier_ok(dev))
+ netif_carrier_event(dev);
+ if (!up) {
+ en_info(priv, "Link Down\n");
+ netif_carrier_off(dev);
+ } else {
+ en_info(priv, "Link Up\n");
+ netif_carrier_on(dev);
+ }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
- int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
- /* If observable port state changed set carrier state and
- * report to system log */
- if (priv->last_link_state != linkstate) {
- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_info(priv, "Link Down\n");
- netif_carrier_off(priv->dev);
- } else {
- en_info(priv, "Link Up\n");
- netif_carrier_on(priv->dev);
- }
- }
- priv->last_link_state = linkstate;
+ mlx4_en_linkstate(priv);
mutex_unlock(&mdev->state_lock);
}
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
- if (err)
+ if (err) {
en_err(priv, "Failed starting port:%d\n", priv->port);
-
+ goto out;
+ }
+ mlx4_en_linkstate(priv);
out:
mutex_unlock(&mdev->state_lock);
return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3d1a20201ef..6bf558c5ec10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -552,7 +552,6 @@ struct mlx4_en_priv {
struct mlx4_hwq_resources res;
int link_state;
- int last_link_state;
bool port_up;
int port;
int registered;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c581b955efb3..559177e6ded4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -563,16 +563,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
- /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
- * reset
- */
- ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
- DEV_CLOCK_CFG);
-
- /* No PFC */
- ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, port);
-
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -1303,14 +1293,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
struct net_device *bridge)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
u32 mask = 0;
int port;
+ if (!ocelot_port || ocelot_port->bridge != bridge ||
+ ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
@@ -1376,7 +1371,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index edafbd37d12c..b8737efd2a85 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*/
#include <net/devlink.h>
#include "ocelot.h"
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 08b481a93460..4b0941f09f71 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -2,7 +2,7 @@
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index c0c465a4a981..e54b9fb2a97a 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -5,7 +5,7 @@
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index fc8b3e64f153..186d0048a9d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
prev_weight = weight;
while (weight) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid_map to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f16a157bb95a..cf5baa5e59bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid bitmap to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return;
+
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index b50b7fafd8d6..f4c3efc3e074 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma,
- (int) priv->tx_buf[priv->tx_tail] + n);
+ virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
set_dma_count(priv->param.dma,
priv->tx_len[priv->tx_tail] - n);
release_dma_lock(flags);
@@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
@@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
} else {
diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c
index 984c9f7f16a8..d16fc58cd48d 100644
--- a/drivers/net/pcs/pcs-xpcs-nxp.c
+++ b/drivers/net/pcs/pcs-xpcs-nxp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include "pcs-xpcs.h"
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index c94cb5382dc9..250742ffdfd9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -179,6 +179,16 @@ static int mdio_remove(struct device *dev)
return 0;
}
+static void mdio_shutdown(struct device *dev)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct device_driver *drv = mdiodev->dev.driver;
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+ if (mdiodrv->shutdown)
+ mdiodrv->shutdown(mdiodev);
+}
+
/**
* mdio_driver_register - register an mdio_driver with the MDIO layer
* @drv: new mdio_driver to register
@@ -193,6 +203,7 @@ int mdio_driver_register(struct mdio_driver *drv)
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
mdiodrv->driver.remove = mdio_remove;
+ mdiodrv->driver.shutdown = mdio_shutdown;
retval = driver_register(&mdiodrv->driver);
if (retval) {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a57251ba5991..f97813a4e8d1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2719,14 +2719,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial = kzalloc(sizeof(*serial), GFP_KERNEL);
if (!serial)
- goto exit;
+ goto err_free_dev;
hso_dev->port_data.dev_serial = serial;
serial->parent = hso_dev;
if (hso_serial_common_create
(serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
- goto exit;
+ goto err_free_serial;
serial->tx_data_length--;
serial->write_data = hso_mux_serial_write_data;
@@ -2742,11 +2742,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
/* done, return it */
return hso_dev;
-exit:
- if (serial) {
- tty_unregister_device(tty_drv, serial->minor);
- kfree(serial);
- }
+err_free_serial:
+ kfree(serial);
+err_free_dev:
kfree(hso_dev);
return NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e46a715dad2b..2ed49884565f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -425,6 +425,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
skb_reserve(skb, p - buf);
skb_put(skb, len);
+
+ page = (struct page *)page->private;
+ if (page)
+ give_pages(rq, page);
goto ok;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5a8df5a195cb..141635a35c28 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
unsigned int h;
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
+ rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 39a01c2a3058..32d5bc4919d8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -499,7 +499,7 @@ check_frags:
* the header's copy failed, and they are
* sharing a slot, send an error
*/
- if (i == 0 && sharedslot)
+ if (i == 0 && !first_shinfo && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index a620c34790e6..0875b773fb41 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
static struct spi_device_id st_nci_spi_id_table[] = {
{ST_NCI_SPI_DRIVER_NAME, 0},
+ {"st21nfcb-spi", 0},
{}
};
MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7efb31b87f37..6600e138945e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+ if (h->ns_id != nsid)
+ continue;
+ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
}
@@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
mutex_unlock(&ns->ctrl->subsys->lock);
/* guarantee not available in head->list */
@@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- /* Synchronize with nvme_init_ns_head() */
- mutex_lock(&ns->head->subsys->lock);
- if (list_empty(&ns->head->list)) {
- list_del_init(&ns->head->entry);
- last_path = true;
- }
- mutex_unlock(&ns->head->subsys->lock);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5d7bc58a27bd..e8ccdd398f78 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
if (ns->head->ns_id < nsid)
continue;
if (ns->head->ns_id == nsid)
nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
+ if (ns->head->ns_id > nsid)
+ goto again;
}
up_read(&ctrl->namespaces_rwsem);
return 0;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a68704e39084..042c594bc57e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
mutex_destroy(&queue->queue_lock);
}
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret)
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
if (ret) {
dev_err(ctrl->ctrl.device,
"rdma_connect_locked failed (%d).\n", ret);
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- nvme_rdma_destroy_queue_ib(queue);
- fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e2ab12f3f51c..e4249b7dc056 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
- } else if (last) {
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -906,12 +913,6 @@ done:
read_unlock_bh(&sk->sk_callback_lock);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
- return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
queue->request = NULL;
@@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- } else
- pending = !llist_empty(&queue->req_list);
+ }
result = nvme_tcp_try_recv(queue);
if (result > 0)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d784f3c200b4..be5d82421e3a 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%*s\n",
+ return snprintf(page, PAGE_SIZE, "%.*s\n",
NVMET_SN_MAX_SIZE, subsys->serial);
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 5b043ee30824..b0800c260f64 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
break;
}
- if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+ /*
+ * Attempt to initialize a restricted-dma-pool region if one was found.
+ * Note that count can hold a negative error code.
+ */
+ if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 3fd74bb34819..a3483484a5a2 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
{ .parse_prop = parse_backlight, },
- { .parse_prop = parse_phy_handle, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a1b1e2a01632..0f40943a9a18 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
void pci_set_acpi_fwnode(struct pci_dev *dev)
{
- if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
ACPI_COMPANION_SET(&dev->dev,
acpi_pci_find_companion(&dev->dev));
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e5089af8ad90..4537d1ea14fd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
* controller to VGA.
*/
static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
/*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
* to VGA. Currently there is no class code defined for UCSI device over PCI
* so using UNKNOWN class for now and it will be updated when UCSI
* over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_UNKNOWN, 8,
quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
/*
* Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 25557b272a4f..4be24890132e 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -99,6 +99,24 @@ error:
return off ?: PCI_VPD_SZ_INVALID;
}
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+ struct pci_vpd *vpd = &dev->vpd;
+
+ if (!vpd->cap)
+ return false;
+
+ if (vpd->len == 0) {
+ vpd->len = pci_vpd_size(dev);
+ if (vpd->len == PCI_VPD_SZ_INVALID) {
+ vpd->cap = 0;
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* Wait for last operation to complete.
* This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
loff_t end = pos + count;
u8 *buf = arg;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
loff_t end = pos + count;
int ret = 0;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
void pci_vpd_init(struct pci_dev *dev)
{
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ return;
+
dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
mutex_init(&dev->vpd.lock);
-
- if (!dev->vpd.len)
- dev->vpd.len = pci_vpd_size(dev);
-
- if (dev->vpd.len == PCI_VPD_SZ_INVALID)
- dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
{
- unsigned int len = dev->vpd.len;
+ unsigned int len;
void *buf;
int cnt;
- if (!dev->vpd.cap)
+ if (!pci_vpd_available(dev))
return ERR_PTR(-ENODEV);
+ len = dev->vpd.len;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 3481479a2942..d6a7c896ac86 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -71,7 +71,7 @@
#define AMD_CPU_ID_YC 0x14B5
#define PMC_MSG_DELAY_MIN_US 100
-#define RESPONSE_REGISTER_LOOP_MAX 200
+#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 821aba31821c..42513eab1d06 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -166,8 +166,7 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
- depends on DELL_WMI
- depends on LEDS_TRIGGER_AUDIO
+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 7f3a03f937f6..d53634c8a6e0 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -144,6 +144,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index a33a5826e81a..08598942a6d7 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
{ }
};
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
struct intel_hid_priv {
struct input_dev *input_dev;
struct input_dev *array;
struct input_dev *switches;
bool wakeup_mode;
- bool dual_accel;
+ bool auto_add_switch;
};
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
* Some convertible have unreliable VGBS return which could cause incorrect
* SW_TABLET_MODE report, in these cases we enable support when receiving
* the first event instead of during driver setup.
- *
- * See dual_accel_detect.h for more info on the dual_accel check.
*/
- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
dev_info(&device->dev, "switch event received, enable switches supports\n");
err = intel_hid_switches_setup(device);
if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
- priv->dual_accel = dual_accel_detect();
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
err = intel_hid_input_setup(device);
if (err) {
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index f58b8543f6ac..66bb39fd0ef9 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -8,7 +8,6 @@
* which provide mailbox interface for power management usage.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
.remove = intel_punit_ipc_remove,
.driver = {
.name = "intel_punit_ipc",
- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+ .acpi_match_table = punit_ipc_acpi_ids,
},
};
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 3e520d5bca07..88b551caeaaf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
goto out_platform_registered;
}
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (strlen(product) > 4)
+ if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
case '6':
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0e1451b1d9c6..033f797861d8 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
};
static const struct property_entry chuwi_hi10_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
};
static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 34056,
+ .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+ 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+ 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+ 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+ },
.acpi_name = "MSSL0017:00",
.properties = chuwi_hi10_plus_props,
};
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
.properties = chuwi_hi10_pro_props,
};
+static const struct property_entry chuwi_hibook_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hibook.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 40392,
+ .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+ 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+ 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+ 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hibook_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -980,6 +1016,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi HiBook (CWI514) */
+ .driver_data = (void *)&chuwi_hibook_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index f02bedf41264..458218f88c5e 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -174,6 +174,7 @@ config PTP_1588_CLOCK_OCP
depends on I2C && MTD
depends on SERIAL_8250
depends on !S390
+ depends on COMMON_CLK
select NET_DEVLINK
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 1d78b455cc48..e34face736f4 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 6cca910a76de..7f458d510483 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
{}
};
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 2f3515fa242a..f3d5c7f4c13d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
- sclp.has_sipl = !!(sccb->cbl & 0x4000);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
if (sccb->cpuoff > 134)
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+ if (sccb->cpuoff > 137)
+ sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 2ec741106cb6..f0538609dfe4 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
+ * @call_gdrv: Call the registered gdrv set_offline function
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
+ if (!call_gdrv) {
+ ret = 0;
+ goto offline;
+ }
if (gdrv->set_offline)
ret = gdrv->set_offline(gdev);
if (ret)
goto out;
+offline:
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
+ ret = ccwgroup_set_offline(gdev, true);
else
ret = -EINVAL;
out:
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f433428057d9..d9b804943d19 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
* and ap_max_adapter_id if this info is available.
-
*/
static void __init ap_init_qci_info(void)
{
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
*/
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
/**
* ap_scan_bus(): Scan the AP bus for new devices
* Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
*/
static void ap_scan_bus(struct work_struct *unused)
{
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index d70c4d3d0907..9ea48bf0ee40 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
/**
* ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
*
* Submit the Reset command to an AP queue.
*/
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 535a60b3946d..a5aa0bdc61d6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -858,7 +858,6 @@ struct qeth_card {
struct napi_struct napi;
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
- struct work_struct close_dev_work;
};
static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 41ca6273b750..e9807d2996a9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -70,15 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
-static void qeth_close_dev_handler(struct work_struct *work)
-{
- struct qeth_card *card;
-
- card = container_of(work, struct qeth_card, close_dev_work);
- QETH_CARD_TEXT(card, 2, "cldevhdl");
- ccwgroup_set_offline(card->gdev);
-}
-
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (IS_VM_NIC(card)) {
@@ -202,6 +193,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
+ if (!queue)
+ return;
+
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
@@ -792,10 +786,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_STOPLAN:
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
- "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+ "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
netdev_name(card->dev));
- schedule_work(&card->close_dev_work);
+ /* Set offline, then probably fail to set online: */
+ qeth_schedule_recovery(card);
} else {
+ /* stay online for subsequent STARTLAN */
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
netdev_name(card->dev), card->info.chpid);
@@ -1537,7 +1533,6 @@ static void qeth_setup_card(struct qeth_card *card)
INIT_LIST_HEAD(&card->ipato.entries);
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
- INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
hash_init(card->rx_mode_addrs);
hash_init(card->local_addrs4);
hash_init(card->local_addrs6);
@@ -5519,7 +5514,8 @@ static int qeth_do_reset(void *data)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
} else {
- ccwgroup_set_offline(card->gdev);
+ qeth_set_offline(card, disc, true);
+ ccwgroup_set_offline(card->gdev, false);
dev_warn(&card->gdev->dev,
"The qeth device driver failed to recover an error on the device\n");
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 72e84ff9fea5..dc6c00768d91 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2307,7 +2307,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED) {
priv = netdev_priv(card->dev);
if (priv->brport_features & BR_LEARNING_SYNC) {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3a523e700a5a..6fd3e288f059 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1969,7 +1969,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 540861ca2ba3..553b6b9d0222 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
int ret;
bool use_dma;
+ /* Zero length transfers won't trigger an interrupt on completion */
+ if (!xfer->len) {
+ spi_finalize_current_transfer(ctlr);
+ return 1;
+ }
+
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index ebd27f883033..8ce840c7ecc8 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -204,9 +204,6 @@ struct tegra_slink_data {
struct dma_async_tx_descriptor *tx_dma_desc;
};
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
@@ -1185,6 +1182,7 @@ static int tegra_slink_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
static int tegra_slink_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1210,6 +1208,7 @@ static int tegra_slink_runtime_resume(struct device *dev)
}
return 0;
}
+#endif /* CONFIG_PM */
static const struct dev_pm_ops slink_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 57e2499ec1ed..aea037c65985 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
const struct spi_device *spi = to_spi_device(dev);
int len;
- len = of_device_modalias(dev, buf, PAGE_SIZE);
- if (len != -ENODEV)
- return len;
-
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
- rc = of_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index d33c5cd684c0..b26b79dfcac9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -582,7 +582,9 @@ config FB_HP300
config FB_TGA
tristate "TGA/SFB+ framebuffer support"
- depends on FB && (ALPHA || TC)
+ depends on FB
+ depends on PCI || TC
+ depends on ALPHA || TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f1ce59b44b9..a37eb52fb401 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND
implements them.
config XEN_PVCALLS_BACKEND
- bool "XEN PV Calls backend driver"
+ tristate "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
help
Experimental backend for the Xen PV Calls protocol
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 671c71245a7b..2d2803883306 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -43,6 +43,8 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/*
- * balloon_process() state:
+ * balloon_thread() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
BP_ECANCELED
};
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
static DEFINE_MUTEX(balloon_mutex);
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
return NOTIFY_OK;
}
@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+ if (state != BP_EAGAIN)
+ credit = 0;
+
+ return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
{
enum bp_state state = BP_DONE;
long credit;
+ unsigned long timeout;
+
+ set_freezable();
+ for (;;) {
+ if (state == BP_EAGAIN)
+ timeout = balloon_stats.schedule_delay * HZ;
+ else
+ timeout = 3600 * HZ;
+ credit = current_credit();
+ wait_event_interruptible_timeout(balloon_thread_wq,
+ balloon_thread_cond(state, credit), timeout);
+
+ if (kthread_should_stop())
+ return 0;
- do {
mutex_lock(&balloon_mutex);
credit = current_credit();
@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
cond_resched();
-
- } while (credit && state == BP_DONE);
-
- /* Schedule more work if there is some still to be done. */
- if (state == BP_EAGAIN)
- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+ }
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
mutex_unlock(&balloon_mutex);
}
@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
static int __init balloon_init(void)
{
+ struct task_struct *task;
+
if (!xen_domain())
return -ENODEV;
@@ -722,6 +744,12 @@ static int __init balloon_init(void)
}
#endif
+ task = kthread_run(balloon_thread, NULL, "xen-balloon");
+ if (IS_ERR(task)) {
+ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+ return PTR_ERR(task);
+ }
+
/* Init the xen-balloon driver. */
xen_balloon_init();
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 643fe440c46e..8c10edf9efe6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
- int i, rc;
- int dma_bits;
+ int rc;
+ unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ unsigned int i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
- dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+ BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+ BUG_ON(nslabs % IO_TLB_SEGSIZE);
i = 0;
do {
- int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
do {
rc = xen_create_contiguous_region(
- p + (i << IO_TLB_SHIFT),
- get_order(slabs << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
- i += slabs;
+ i += IO_TLB_SEGSIZE;
} while (i < nslabs);
return 0;
}
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
return "";
}
-#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
order--;
}
if (!start)
- goto error;
+ goto exit;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
- pr_info("Lowering to %luMB\n",
- (nslabs << IO_TLB_SHIFT) >> 20);
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
+exit:
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- free_pages((unsigned long)start, order);
return rc;
}
@@ -244,9 +241,9 @@ retry:
rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
@@ -254,7 +251,7 @@ retry:
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}
- if (swiotlb_init_with_tbl(start, nslabs, false))
+ if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 7d9b23d981bf..1b4d5809808d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -21,6 +21,37 @@
#include "internal.h"
/*
+ * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+ struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+ unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+ struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+ struct afs_vnode *vnode;
+ struct afs_cell *cell = server->cell;
+
+ down_read(&cell->fs_open_mmaps_lock);
+
+ list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+ if (vnode->cb_server == server) {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ queue_work(system_unbound_wq, &vnode->cb_work);
+ }
+ }
+
+ up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
* Allow the fileserver to request callback state (re-)initialisation.
* Unfortunately, UUIDs are not guaranteed unique.
*/
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
rcu_read_lock();
do {
server->cb_s_break++;
- server = rcu_dereference(server->uuid_next);
- } while (0);
+ atomic_inc(&server->cell->fs_s_break);
+ if (!list_empty(&server->cell->fs_open_mmaps))
+ queue_work(system_unbound_wq, &server->initcb_work);
+
+ } while ((server = rcu_dereference(server->uuid_next)));
rcu_read_unlock();
}
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
afs_clear_permits(vnode);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
+ if (reason != afs_cb_break_for_deleted &&
+ vnode->status.type == AFS_FTYPE_FILE &&
+ atomic_read(&vnode->cb_nr_mmap))
+ queue_work(system_unbound_wq, &vnode->cb_work);
+
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 887b673f6223..d88407fb9bc0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
+ INIT_LIST_HEAD(&cell->fs_open_mmaps);
+ init_rwsem(&cell->fs_open_mmaps_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ac829e63c570..4579bbda4634 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
*/
static int afs_d_revalidate_rcu(struct dentry *dentry)
{
- struct afs_vnode *dvnode, *vnode;
+ struct afs_vnode *dvnode;
struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
return -ECHILD;
}
- /* Check to see if the vnode referred to by the dentry still
- * has a callback.
- */
- if (d_really_is_positive(dentry)) {
- inode = d_inode_rcu(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- if (!afs_check_validity(vnode))
- return -ECHILD;
- }
- }
-
return 1; /* Still valid */
}
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- if (d_really_is_positive(dentry)) {
- inode = d_inode(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_validate(vnode, key);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- goto out_bad;
- }
- }
-
- /* lock down the parent dentry so we can peer at it */
+ /* Hold the parent dentry so we can peer at it */
parent = dget_parent(dentry);
dir = AFS_FS_I(d_inode(parent));
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
_debug("%pd: parent dir deleted", dentry);
- goto out_bad_parent;
+ goto not_found;
}
/* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
case 0:
/* the filename maps to something */
if (d_really_is_negative(dentry))
- goto out_bad_parent;
+ goto not_found;
inode = d_inode(dentry);
if (is_bad_inode(inode)) {
printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
dentry);
- goto out_bad_parent;
+ goto not_found;
}
vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
dentry, fid.unique,
vnode->fid.unique,
vnode->vfs_inode.i_generation);
- write_seqlock(&vnode->cb_lock);
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- write_sequnlock(&vnode->cb_lock);
goto not_found;
}
goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
default:
_debug("failed to iterate dir %pd: %d",
parent, ret);
- goto out_bad_parent;
+ goto not_found;
}
out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
_leave(" = 1 [valid]");
return 1;
- /* the dirent, if it exists, now points to a different vnode */
not_found:
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
_debug("dropping dentry %pd2", dentry);
dput(parent);
-out_bad:
key_put(key);
_leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0)
+ goto error_op;
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, vnode);
op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->create.reason = afs_edit_dir_for_link;
return afs_do_sync_operation(op);
+error_op:
+ afs_put_operation(op);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ ret = afs_validate(vnode, op->key);
+ op->error = ret;
+ if (ret < 0)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index f4600c1353ad..540b9fc96824 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
if (b == nr_blocks) {
_debug("init %u", b);
afs_edit_init_block(meta, block, b);
- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
/* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
new_directory:
afs_edit_init_block(meta, meta, 0);
i_size = AFS_DIR_BLOCK_SIZE;
- i_size_write(&vnode->vfs_inode, i_size);
+ afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
page = page0;
block = meta;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index db035ae2a134..e6c447ae91f3 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = afs_file_read_iter,
.write_iter = afs_file_write,
.mmap = afs_file_mmap,
.splice_read = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
};
static const struct vm_operations_struct afs_vm_ops = {
+ .open = afs_vm_open,
+ .close = afs_vm_close,
.fault = filemap_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = afs_vm_map_pages,
.page_mkwrite = afs_page_mkwrite,
};
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->subreq = subreq;
fsreq->pos = subreq->start + subreq->transferred;
fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = subreq->rreq->netfs_priv;
+ fsreq->key = key_get(subreq->rreq->netfs_priv);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->pos, fsreq->len);
afs_fetch_data(fsreq->vnode, fsreq);
+ afs_put_read(fsreq);
}
static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
return 1;
}
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+ if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+ if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ return;
+
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ list_del_init(&vnode->cb_mmap_link);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ flush_work(&vnode->cb_work);
+}
+
/*
* Handle setting up a memory mapping on an AFS file.
*/
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
+ afs_add_open_mmap(vnode);
+
ret = generic_file_mmap(file, vma);
if (ret == 0)
vma->vm_ops = &afs_vm_ops;
+ else
+ afs_drop_open_mmap(vnode);
return ret;
}
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+ afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+ afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+ struct afs_file *af = vmf->vma->vm_file->private_data;
+
+ switch (afs_validate(vnode, af->key)) {
+ case 0:
+ return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EINTR:
+ case -ERESTARTSYS:
+ return VM_FAULT_RETRY;
+ case -ESTALE:
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
+ int ret;
+
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
+ return generic_file_read_iter(iocb, iter);
+}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index e7e98ad63a91..c0031a3ab42f 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include "afs_fs.h"
#include "internal.h"
+#include "protocol_afs.h"
#include "protocol_yfs.h"
static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
unsigned int index = call->addr_ix;
- unsigned int rtt_us = 0;
+ unsigned int rtt_us = 0, cap0;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
+ cap0 = ntohl(call->tmp);
+ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ else
+ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index dd3f45d906d2..4943413d9c5f 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (upper_32_bits(req->pos) ||
- upper_32_bits(req->len) ||
- upper_32_bits(req->pos + req->len))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_fetch_data64(op);
_enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
(unsigned long long)op->store.pos,
(unsigned long long)op->store.i_size);
- if (upper_32_bits(op->store.pos) ||
- upper_32_bits(op->store.size) ||
- upper_32_bits(op->store.i_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_store_data64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (upper_32_bits(attr->ia_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_setattr_size64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
return ret;
count = ntohl(call->tmp);
-
call->count = count;
call->count2 = count;
- afs_extract_discard(call, count * sizeof(__be32));
+ if (count == 0) {
+ call->unmarshall = 4;
+ call->tmp = 0;
+ break;
+ }
+
+ /* Extract the first word of the capabilities to call->tmp */
+ afs_extract_to_tmp(call);
call->unmarshall++;
fallthrough;
- /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
- /* TODO: Examine capabilities */
+ afs_extract_discard(call, (count - 1) * sizeof(__be32));
+ call->unmarshall++;
+ fallthrough;
+
+ /* Extract remaining capabilities words */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
call->unmarshall++;
break;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 80b6c8d967d5..8fcffea2daf5 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,16 +54,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
}
/*
- * Set the file size and block count. Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_operation *op,
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
}
/*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
*/
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
{
- struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server_list *slist;
struct afs_server *server;
+ bool good;
int i;
+ if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+ return true;
+
+ rcu_read_lock();
+
+ slist = rcu_dereference(vnode->volume->servers);
for (i = 0; i < slist->nr_servers; i++) {
server = slist->servers[i].server;
if (server == vnode->cb_server) {
- *_s_break = READ_ONCE(server->cb_s_break);
- return true;
+ good = (vnode->cb_s_break == server->cb_s_break);
+ rcu_read_unlock();
+ return good;
}
}
+ rcu_read_unlock();
return false;
}
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
- bool valid;
- unsigned int cb_break, cb_s_break, cb_v_break;
+ unsigned int cb_break;
int seq = 0;
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
- afs_get_s_break_rcu(vnode, &cb_s_break)) {
- if (vnode->cb_s_break != cb_s_break ||
- vnode->cb_v_break != cb_v_break) {
- vnode->cb_s_break = cb_s_break;
- vnode->cb_v_break = cb_v_break;
- need_clear = afs_cb_break_for_vsbreak;
- valid = false;
- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ if (vnode->cb_v_break != vnode->volume->cb_v_break)
+ need_clear = afs_cb_break_for_v_break;
+ else if (!afs_check_server_good(vnode))
+ need_clear = afs_cb_break_for_s_reinit;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
need_clear = afs_cb_break_for_zap;
- valid = false;
- } else if (vnode->cb_expires_at - 10 <= now) {
+ else if (vnode->cb_expires_at - 10 <= now)
need_clear = afs_cb_break_for_lapsed;
- valid = false;
- } else {
- valid = true;
- }
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- valid = true;
+ ;
} else {
- vnode->cb_v_break = cb_v_break;
- valid = false;
+ need_clear = afs_cb_break_no_promise;
}
} while (need_seqretry(&vnode->cb_lock, seq));
done_seqretry(&vnode->cb_lock, seq);
- if (need_clear != afs_cb_break_no_break) {
- write_seqlock(&vnode->cb_lock);
- if (cb_break == vnode->cb_break)
- __afs_break_callback(vnode, need_clear);
- else
- trace_afs_cb_miss(&vnode->fid, need_clear);
- write_sequnlock(&vnode->cb_lock);
- valid = false;
- }
+ if (need_clear == afs_cb_break_no_break)
+ return true;
- return valid;
+ write_seqlock(&vnode->cb_lock);
+ if (need_clear == afs_cb_break_no_promise)
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ else if (cb_break == vnode->cb_break)
+ __afs_break_callback(vnode, need_clear);
+ else
+ trace_afs_cb_miss(&vnode->fid, need_clear);
+ write_sequnlock(&vnode->cb_lock);
+ return false;
}
/*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
- bool valid;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
- rcu_read_lock();
- valid = afs_check_validity(vnode);
- rcu_read_unlock();
-
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- clear_nlink(&vnode->vfs_inode);
+ if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+ if (vnode->vfs_inode.i_nlink)
+ clear_nlink(&vnode->vfs_inode);
+ goto valid;
+ }
- if (valid)
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_check_validity(vnode))
goto valid;
down_write(&vnode->validate_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5ed416f4ff33..0ad97a8fc0d4 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -390,6 +390,9 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_open_mmaps_lock;
+ struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
+ atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
+ struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
struct afs_server *gc_next; /* Next server in manager's list */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
@@ -516,6 +520,7 @@ struct afs_server {
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
atomic_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
+ struct work_struct cb_work; /* Work for mmap'd files */
+ struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
+ atomic_t cb_nr_mmap; /* Number of mmaps */
+ unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
/*
* callback.c
*/
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1586,6 +1597,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
}
/*
+ * Set the file size and block count. Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+ i_size_write(&vnode->vfs_inode, size);
+ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
* Check for a conflicting operation on a directory that we just unlinked from.
* If someone managed to sneak a link or an unlink in on the file we just
* unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644
index 000000000000..0c39358c8b70
--- /dev/null
+++ b/fs/afs/protocol_afs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index b5bd03b1d3c7..e4cd89c44c46 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -168,3 +168,9 @@ enum yfs_lock_type {
yfs_LockMandatoryWrite = 0x101,
yfs_LockMandatoryExtend = 0x102,
};
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d83f13c44b92..79e1a5f6701b 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -374,6 +374,7 @@ selected_server:
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
vnode->cb_v_break = vnode->volume->cb_v_break;
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 684a2b02b9ff..6e5b9a19b234 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_give_up_callbacks(net, server);
+ flush_work(&server->initcb_work);
afs_put_server(net, server, afs_server_trace_destroy);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e38bb1e7a4d2..d110def8aa8e 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
+ INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c0534697268e..2dfe3b3a53d6 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size)
- i_size_write(&vnode->vfs_inode, maybe_i_size);
+ afs_set_i_size(vnode, maybe_i_size);
write_sequnlock(&vnode->cb_lock);
}
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas)))
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
break;
+ }
- if (!trylock_page(page))
+ if (!trylock_page(page)) {
+ put_page(page);
break;
+ }
if (!PageDirty(page) || PageWriteback(page)) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
t = afs_page_dirty_to(page, priv);
if (f != 0 && !new_content) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
ssize_t result;
size_t count = iov_iter_count(from);
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (!count)
return 0;
+ result = afs_validate(vnode, af->key);
+ if (result < 0)
+ return result;
+
result = generic_file_write_iter(iocb, from);
_leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
*/
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_file *af = file->private_data;
+ int ret;
_enter("{%llx:%llu},{n=%pD},%d",
vnode->fid.vid, vnode->fid.vnode, file,
datasync);
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
return file_write_and_wait_range(file, start, end);
}
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = file->private_data;
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ afs_validate(vnode, af->key);
+
sb_start_pagefault(inode->i_sb);
/* Wait for the page to be written to the cache before we allow it to
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 8a3b30ec860c..8be57aaedab6 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ * CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 51a824fc926a..de2c12bcfa4b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs_debug.c
*
* Copyright (C) International Business Machines Corp., 2000,2005
*
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4fd788586399..f97407520ea1 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_fs_sb.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index ef723be358af..b87cbbe6d2d4 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_ioctl.h
*
* Structure definitions for io control for cifs/smb3
*
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 8fa26a8530f8..353bd0dd7026 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index 31387d0ea32e..e6a0451877d4 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 171ad8b42107..e7582dd79179 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/cifs_unicode.c
*
* Copyright (c) International Business Machines Corp., 2000,2009
* Modified by Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 388eb536cff1..ee3aab3dd4ac 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsacl.c
*
* Copyright (C) International Business Machines Corp., 2007,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index f8292bcf8594..ccbfc754bd3c 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsacl.h
*
* Copyright (c) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 2e6f40344037..d118282071b3 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsencrypt.c
*
* Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
* for more detailed information
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c20bfa187ac..9fa930dfd78d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsfs.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d25a4099b32e..b50da1901ebd 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsfs.h
*
* Copyright (c) International Business Machines Corp., 2002, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c068f7d8d879..e916470468ea 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsglob.h
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 98e8e5aa0613..d2ff438fd31f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifspdu.h
*
* Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f9740c21ca3d..d0f85b666662 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ const char *path);
+
extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a8e41c1e80ca..243d17696f06 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0db344807ef1..7881115cfbee 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1090,7 +1089,7 @@ next_pdu:
module_put_and_exit(0);
}
-/**
+/*
* Returns true if srcaddr isn't specified and rhs isn't specified, or
* if srcaddr is specified and matches the IP address of the rhs argument
*/
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
/**
* cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ * new tree connection for the IPC (interprocess communication RPC)
*
* A new IPC connection is made and stored in the session
* tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
/**
* cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
*
* Needs to be called everytime a session is destroyed.
*
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
/**
* cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
*
* This function assumes it is being called from cifs_mount() where we
* already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/**
* cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
*
* - tcon refcount is the number of mount points using the tcon.
* - ses refcount is the number of tcon using the session.
@@ -3030,7 +3037,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
return full_path;
}
-/**
+/*
* expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
*
* If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 5f8a302ffcb2..6e8e7cc26ae2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dir.c
*
* vfs operations that deal with dentries
*
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 8c616aaeb7c4..0458d28d71aa 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dns_resolve.c
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 9fa2807ef79e..afc0df381246 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- * Handles host name to IP address resolution
+ * DNS Resolver upcall management for CIFS DFS
+ * Handles host name to IP address resolution
*
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 747a540db954..37c28415df1e 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/export.c
*
* Copyright (C) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d0216472f1c6..6796fc73b304 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/file.c
*
* vfs operations that deal with files
*
@@ -883,6 +882,7 @@ int cifs_close(struct inode *inode, struct file *file)
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
+ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index fab47fa7df74..8eedd20c44ab 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/fscache.c - CIFS filesystem cache interface
+ * CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
* Author(s): Suresh Jayaraman <sjayaraman@suse.de>
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 82e856b9cf89..9baa1d0f22bd 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ * CIFS filesystem cache interface definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 50c01cff4c84..82848412ad85 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out;
}
- cifs_close_deferred_file(CIFS_I(inode));
+ cifs_close_deferred_file_under_dentry(tcon, full_path);
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
goto cifs_rename_exit;
}
- cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, from_name);
if (d_inode(target_dentry) != NULL)
- cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, to_name);
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 42c6a0bac6c8..0359b604bdbc 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/ioctl.c
*
* vfs operations that deal with io control
*
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
- caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
if (get_user(ExtAttrBits, (int __user *)arg)) {
rc = -EFAULT;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index f0a6d63bc08c..852e54ee82c2 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 9469f1cf0b46..03da00eb7c04 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -736,7 +735,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -767,7 +766,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -781,6 +780,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
kfree(tmp_list);
}
}
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+ struct cifsFileInfo *cfile;
+ struct list_head *tmp;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+ void *page;
+ const char *full_path;
+
+ INIT_LIST_HEAD(&file_head);
+ page = alloc_dentry_path();
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ full_path = build_path_from_dentry(cfile->dentry, page);
+ if (strstr(full_path, path)) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+ }
+ }
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
+ }
+ free_dentry_path(page);
+}
/* parses DFS refferal V3 structure
* caller is responsible for freeing target_nodes
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 0e728aac67e9..fa9fbd6a819c 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/netmisc.c
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 378133ce8869..25a2b8ef88b9 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 54d77c99e21c..1929e80c09ee 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/readdir.c
*
* Directory search handling
*
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
index 137f7c95afd6..ae1d025da294 100644
--- a/fs/cifs/rfc1002pdu.h
+++ b/fs/cifs/rfc1002pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/rfc1002pdu.h
*
* Protocol Data Unit definitions for RFC 1001/1002 support
*
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 118403fbeda2..23e02db7923f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/sess.c
*
* SMB/CIFS session setup handling routines
*
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index c9d8a50062b8..f5dcc4940b6d 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2file.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Author(s): Steve French (sfrench@us.ibm.com),
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index d0e9f3782bd9..ca692b2283cd 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2glob.h
*
* Definitions for various global variables and structures
*
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 957b2594f02e..8297703492ee 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2inode.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 668f77108831..29b5554f6263 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b6d2e3591927..672ae78e866a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index e9cac7970b66..f32c99c9ba13 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2pdu.h
*
* Copyright (c) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 263767f644f8..547945443fa7 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2proto.h
*
* Copyright (c) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
index 0215ef36e240..a9e958166fc5 100644
--- a/fs/cifs/smb2status.h
+++ b/fs/cifs/smb2status.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2status.h
*
* SMB2 Status code (network error) definitions
* Definitions are from MS-ERREF
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 6f7952ea4941..f59b956f9d25 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2transport.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 60189efb3236..aeffdad829e2 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smberr.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 75a95de320cf..b7379329b741 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/transport.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c
index 59b6c577aa0a..2f075b5b50df 100644
--- a/fs/cifs/winucase.c
+++ b/fs/cifs/winucase.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/winucase.c
*
* Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
*
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9ed481e79ce0..7d8b72d67c80 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/xattr.c
*
* Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 6c55362c1f99..c2e0e8e80949 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -14,6 +14,7 @@
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
#include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
#include "io-wq.h"
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
if (refcount_dec_and_test(&worker->ref))
complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
if (worker->flags & IO_WORKER_F_FREE)
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
- acct->nr_workers--;
preempt_disable();
io_wqe_dec_running(worker);
worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
*/
static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
- bool do_create = false;
-
/*
* Most likely an attempt to queue unbounded work on an io_wq that
* wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
pr_warn_once("io-wq is not configured for unbound workers");
raw_spin_lock(&wqe->lock);
- if (acct->nr_workers < acct->max_workers) {
- acct->nr_workers++;
- do_create = true;
+ if (acct->nr_workers == acct->max_workers) {
+ raw_spin_unlock(&wqe->lock);
+ return true;
}
+ acct->nr_workers++;
raw_spin_unlock(&wqe->lock);
- if (do_create) {
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- return create_io_worker(wqe->wq, wqe, acct->index);
- }
-
- return true;
+ atomic_inc(&acct->nr_running);
+ atomic_inc(&wqe->wq->worker_refs);
+ return create_io_worker(wqe->wq, wqe, acct->index);
}
static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
}
/* timed out, exit unless we're the last worker */
if (last_timeout && acct->nr_workers > 1) {
+ acct->nr_workers--;
raw_spin_unlock(&wqe->lock);
__set_current_state(TASK_RUNNING);
break;
@@ -1287,6 +1282,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
{
int i, node, prev = 0;
+ BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
+
for (i = 0; i < 2; i++) {
if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
new_count[i] = task_rlimit(current, RLIMIT_NPROC);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 16fb7436043c..e372d5b9f6dc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -712,6 +712,7 @@ struct io_async_rw {
struct iovec fast_iov[UIO_FASTIOV];
const struct iovec *free_iovec;
struct iov_iter iter;
+ struct iov_iter_state iter_state;
size_t bytes_done;
struct wait_page_queue wpq;
};
@@ -735,7 +736,6 @@ enum {
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT,
- REQ_F_DONT_REISSUE_BIT,
REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +782,6 @@ enum {
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
- /* don't attempt request reissue, see io_rw_reissue() */
- REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
/* supports async reads */
REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
/* supports async writes */
@@ -2444,13 +2442,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN &&
- !(req->flags & REQ_F_DONT_REISSUE)) {
- req->iopoll_completed = 0;
- io_req_task_queue_reissue(req);
- continue;
- }
-
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
(*nr_events)++;
@@ -2613,8 +2604,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
if (!rw)
return !io_req_prep_async(req);
- /* may have left rw->iter inconsistent on -EIOCBQUEUED */
- iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+ iov_iter_restore(&rw->iter, &rw->iter_state);
return true;
}
@@ -2714,10 +2704,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if (unlikely(res != req->result)) {
- if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
- io_resubmit_prep(req))) {
- req_set_fail(req);
- req->flags |= REQ_F_DONT_REISSUE;
+ if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE;
+ return;
}
}
@@ -2843,7 +2832,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
return __io_file_supports_nowait(req->file, rw);
}
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int rw)
{
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2855,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(ret))
return ret;
- /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
- if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+ /*
+ * If the file is marked O_NONBLOCK, still allow retry for it if it
+ * supports async. Otherwise it's impossible to use O_NONBLOCK files
+ * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+ */
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
req->flags |= REQ_F_NOWAIT;
ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2926,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
- bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */
if (io && io->bytes_done > 0) {
@@ -2943,19 +2937,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
- if (ret >= 0 && check_reissue)
+ if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
__io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
- if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+ if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
io_req_task_queue_reissue(req);
} else {
+ unsigned int cflags = io_put_rw_kbuf(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret,
- io_put_rw_kbuf(req));
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ mutex_lock(&ctx->uring_lock);
+ __io_req_complete(req, issue_flags, ret, cflags);
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ __io_req_complete(req, issue_flags, ret, cflags);
+ }
}
}
}
@@ -3263,12 +3265,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
ret = nr;
break;
}
+ if (!iov_iter_is_bvec(iter)) {
+ iov_iter_advance(iter, nr);
+ } else {
+ req->rw.len -= nr;
+ req->rw.addr += nr;
+ }
ret += nr;
if (nr != iovec.iov_len)
break;
- req->rw.len -= nr;
- req->rw.addr += nr;
- iov_iter_advance(iter, nr);
}
return ret;
@@ -3315,12 +3320,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
if (!req->async_data) {
+ struct io_async_rw *iorw;
+
if (io_alloc_async_data(req)) {
kfree(iovec);
return -ENOMEM;
}
io_req_map_rw(req, iovec, fast_iov, iter);
+ iorw = req->async_data;
+ /* we've copied and mapped the iter, ensure state is saved */
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
}
return 0;
}
@@ -3339,6 +3349,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
iorw->free_iovec = iov;
if (iov)
req->flags |= REQ_F_NEED_CLEANUP;
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
return 0;
}
@@ -3346,7 +3357,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, READ);
}
/*
@@ -3442,19 +3453,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t io_size, ret, ret2;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ /*
+ * We come here from an earlier attempt, restore our state to
+ * match in case it doesn't. It's cheap enough that we don't
+ * need to make this conditional.
+ */
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3468,7 +3488,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return ret ?: -EAGAIN;
}
- ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret)) {
kfree(iovec);
return ret;
@@ -3484,30 +3504,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (req->flags & REQ_F_NOWAIT)
goto done;
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
} else if (ret == -EIOCBQUEUED) {
goto out_free;
- } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+ } else if (ret <= 0 || ret == req->result || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
+ /*
+ * Don't depend on the iter state matching what was consumed, or being
+ * untouched in case of error. Restore it and we'll advance it
+ * manually if we need to.
+ */
+ iov_iter_restore(iter, state);
+
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
if (ret2)
return ret2;
iovec = NULL;
rw = req->async_data;
- /* now use our persistent iterator, if we aren't already */
- iter = &rw->iter;
+ /*
+ * Now use our persistent iterator and state, if we aren't already.
+ * We've restored and mapped the iter to match.
+ */
+ if (iter != &rw->iter) {
+ iter = &rw->iter;
+ state = &rw->iter_state;
+ }
do {
- io_size -= ret;
+ /*
+ * We end up here because of a partial read, either from
+ * above or inside this loop. Advance the iter by the bytes
+ * that were consumed.
+ */
+ iov_iter_advance(iter, ret);
+ if (!iov_iter_count(iter))
+ break;
rw->bytes_done += ret;
+ iov_iter_save_state(iter, state);
+
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3564,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return 0;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
- } while (ret > 0 && ret < io_size);
+ iov_iter_restore(iter, state);
+ } while (ret > 0);
done:
kiocb_done(kiocb, ret, issue_flags);
out_free:
@@ -3539,7 +3579,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, WRITE);
}
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3588,24 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t ret, ret2, io_size;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
+ ret2 = 0;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3577,7 +3622,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
(req->flags & REQ_F_ISREG))
goto copy_iov;
- ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret))
goto out_free;
@@ -3624,9 +3669,9 @@ done:
kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
+ iov_iter_restore(iter, state);
+ if (ret2 > 0)
+ iov_iter_advance(iter, ret2);
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
}
@@ -7515,6 +7560,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
break;
} while (1);
+ if (uts) {
+ struct timespec64 ts;
+
+ if (get_timespec64(&ts, uts))
+ return -EFAULT;
+ timeout = timespec64_to_jiffies(&ts);
+ }
+
if (sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
@@ -7528,14 +7581,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8329,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
#endif
}
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+ struct io_rsrc_node *node, void *rsrc)
+{
+ struct io_rsrc_put *prsrc;
+
+ prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ if (!prsrc)
+ return -ENOMEM;
+
+ prsrc->tag = *io_get_tag_slot(data, idx);
+ prsrc->rsrc = rsrc;
+ list_add(&prsrc->list, &node->rsrc_list);
+ return 0;
+}
+
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index)
{
struct io_ring_ctx *ctx = req->ctx;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ bool needs_switch = false;
struct io_fixed_file *file_slot;
int ret = -EBADF;
@@ -8304,9 +8365,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
- ret = -EBADF;
- if (file_slot->file_ptr)
- goto err;
+
+ if (file_slot->file_ptr) {
+ struct file *old_file;
+
+ ret = io_rsrc_node_switch_start(ctx);
+ if (ret)
+ goto err;
+
+ old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+ ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+ ctx->rsrc_node, old_file);
+ if (ret)
+ goto err;
+ file_slot->file_ptr = 0;
+ needs_switch = true;
+ }
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
io_fixed_file_set(file_slot, file);
@@ -8318,27 +8392,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
ret = 0;
err:
+ if (needs_switch)
+ io_rsrc_node_switch(ctx, ctx->file_data);
io_ring_submit_unlock(ctx, !force_nonblock);
if (ret)
fput(file);
return ret;
}
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc)
-{
- struct io_rsrc_put *prsrc;
-
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
-
- prsrc->tag = *io_get_tag_slot(data, idx);
- prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->rsrc_list);
- return 0;
-}
-
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up,
unsigned nr_args)
@@ -10560,10 +10621,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
* ordering. Fine to drop uring_lock here, we hold
* a ref to the ctx.
*/
+ refcount_inc(&sqd->refs);
mutex_unlock(&ctx->uring_lock);
mutex_lock(&sqd->lock);
mutex_lock(&ctx->uring_lock);
- tctx = sqd->thread->io_uring;
+ if (sqd->thread)
+ tctx = sqd->thread->io_uring;
}
} else {
tctx = current->io_uring;
@@ -10577,16 +10640,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
if (ret)
goto err;
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
if (copy_to_user(arg, new_count, sizeof(new_count)))
return -EFAULT;
return 0;
err:
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
return ret;
}
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
index 0b307ca28a19..3eac3c01749f 100644
--- a/fs/ksmbd/misc.c
+++ b/fs/ksmbd/misc.c
@@ -191,19 +191,77 @@ int get_nlink(struct kstat *st)
return nlink;
}
-void ksmbd_conv_path_to_unix(char *path)
+char *ksmbd_conv_path_to_unix(char *path)
{
+ size_t path_len, remain_path_len, out_path_len;
+ char *out_path, *out_next;
+ int i, pre_dotdot_cnt = 0, slash_cnt = 0;
+ bool is_last;
+
strreplace(path, '\\', '/');
-}
+ path_len = strlen(path);
+ remain_path_len = path_len;
+ if (path_len == 0)
+ return ERR_PTR(-EINVAL);
-void ksmbd_strip_last_slash(char *path)
-{
- int len = strlen(path);
+ out_path = kzalloc(path_len + 2, GFP_KERNEL);
+ if (!out_path)
+ return ERR_PTR(-ENOMEM);
+ out_path_len = 0;
+ out_next = out_path;
+
+ do {
+ char *name = path + path_len - remain_path_len;
+ char *next = strchrnul(name, '/');
+ size_t name_len = next - name;
+
+ is_last = !next[0];
+ if (name_len == 2 && name[0] == '.' && name[1] == '.') {
+ pre_dotdot_cnt++;
+ /* handle the case that path ends with "/.." */
+ if (is_last)
+ goto follow_dotdot;
+ } else {
+ if (pre_dotdot_cnt) {
+follow_dotdot:
+ slash_cnt = 0;
+ for (i = out_path_len - 1; i >= 0; i--) {
+ if (out_path[i] == '/' &&
+ ++slash_cnt == pre_dotdot_cnt + 1)
+ break;
+ }
+
+ if (i < 0 &&
+ slash_cnt != pre_dotdot_cnt) {
+ kfree(out_path);
+ return ERR_PTR(-EINVAL);
+ }
+
+ out_next = &out_path[i+1];
+ *out_next = '\0';
+ out_path_len = i + 1;
- while (len && path[len - 1] == '/') {
- path[len - 1] = '\0';
- len--;
- }
+ }
+
+ if (name_len != 0 &&
+ !(name_len == 1 && name[0] == '.') &&
+ !(name_len == 2 && name[0] == '.' && name[1] == '.')) {
+ next[0] = '\0';
+ sprintf(out_next, "%s/", name);
+ out_next += name_len + 1;
+ out_path_len += name_len + 1;
+ next[0] = '/';
+ }
+ pre_dotdot_cnt = 0;
+ }
+
+ remain_path_len -= name_len + 1;
+ } while (!is_last);
+
+ if (out_path_len > 0)
+ out_path[out_path_len-1] = '\0';
+ path[path_len] = '\0';
+ return out_path;
}
void ksmbd_conv_path_to_windows(char *path)
diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
index af8717d4d85b..b7b10139ada2 100644
--- a/fs/ksmbd/misc.h
+++ b/fs/ksmbd/misc.h
@@ -16,8 +16,7 @@ int ksmbd_validate_filename(char *filename);
int parse_stream_name(char *filename, char **stream_name, int *s_type);
char *convert_to_nt_pathname(char *filename, char *sharepath);
int get_nlink(struct kstat *st);
-void ksmbd_conv_path_to_unix(char *path);
-void ksmbd_strip_last_slash(char *path);
+char *ksmbd_conv_path_to_unix(char *path);
void ksmbd_conv_path_to_windows(char *path);
char *ksmbd_extract_sharename(char *treename);
char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index c86164dc70bb..6304c9bda479 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -634,7 +634,7 @@ static char *
smb2_get_name(struct ksmbd_share_config *share, const char *src,
const int maxlen, struct nls_table *local_nls)
{
- char *name, *unixname;
+ char *name, *norm_name, *unixname;
name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
if (IS_ERR(name)) {
@@ -643,11 +643,15 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
}
/* change it to absolute unix name */
- ksmbd_conv_path_to_unix(name);
- ksmbd_strip_last_slash(name);
-
- unixname = convert_to_unix_name(share, name);
+ norm_name = ksmbd_conv_path_to_unix(name);
+ if (IS_ERR(norm_name)) {
+ kfree(name);
+ return norm_name;
+ }
kfree(name);
+
+ unixname = convert_to_unix_name(share, norm_name);
+ kfree(norm_name);
if (!unixname) {
pr_err("can not convert absolute name\n");
return ERR_PTR(-ENOMEM);
@@ -4041,6 +4045,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
+ if (le32_to_cpu(req->InputBufferLength) <
+ sizeof(struct smb2_ea_info_req))
+ return -EINVAL;
+
ea_req = (struct smb2_ea_info_req *)req->Buffer;
} else {
/* need to send all EAs, if no specific EA is requested*/
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 52b2556e76b1..3a7fa23ba850 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -20,7 +20,6 @@
#define SUBMOD_NAME "smb_direct"
#include <linux/kthread.h>
-#include <linux/rwlock.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/highmem.h>
diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
index c69a0bb76c94..4f1a451da5ba 100644
--- a/fs/lockd/svcxdr.h
+++ b/fs/lockd/svcxdr.h
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
static inline bool
svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
{
- unsigned int quadlen = XDR_QUADLEN(obj->len);
- __be32 *p;
-
- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
- return false;
- p = xdr_reserve_space(xdr, obj->len);
- if (!p)
+ if (obj->len > XDR_MAX_NETOBJ)
return false;
- p[quadlen - 1] = 0; /* XDR pad */
- memcpy(p, obj->data, obj->len);
-
- return true;
+ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
}
#endif /* _LOCKD_SVCXDR_H_ */
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 42356416f0a0..3f4027a5de88 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
}
static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
- struct nfsd4_session *session, u32 req)
+ struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{
struct nfs4_client *clp = session->se_client;
struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
else
status = nfserr_inval;
spin_unlock(&clp->cl_lock);
+ if (status == nfs_ok && conn)
+ *conn = c;
return status;
}
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
- status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
- if (status == nfs_ok || status == nfserr_inval)
+ status = nfsd4_match_existing_connection(rqstp, session,
+ bcts->dir, &conn);
+ if (status == nfs_ok) {
+ if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+ bcts->dir == NFS4_CDFC4_BACK)
+ conn->cn_flags |= NFS4_CDFC4_BACK;
+ nfsd4_probe_callback(session->se_client);
+ goto out;
+ }
+ if (status == nfserr_inval)
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 2a66844b7ff8..66645a5a35f3 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -20,12 +20,33 @@
* depending on the status field in the last byte. The
* first byte is where the name start either way, and a
* zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
*/
union qnx4_directory_entry {
struct {
- char de_name;
- char de_pad[62];
- char de_status;
+ const char de_name[48];
+ u8 de_pad[15];
+ u8 de_status;
};
struct qnx4_inode_entry inode;
struct qnx4_link_info link;
@@ -53,29 +74,26 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
union qnx4_directory_entry *de;
- const char *name;
offset = ix * QNX4_DIR_ENTRY_SIZE;
de = (union qnx4_directory_entry *) (bh->b_data + offset);
- if (!de->de_name)
+ if (!de->de_name[0])
continue;
if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
continue;
if (!(de->de_status & QNX4_FILE_LINK)) {
size = sizeof(de->inode.di_fname);
- name = de->inode.di_fname;
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
} else {
size = sizeof(de->link.dl_fname);
- name = de->link.dl_fname;
ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
QNX4_INODES_PER_BLOCK +
de->link.dl_inode_ndx;
}
- size = strnlen(name, size);
+ size = strnlen(de->de_name, size);
QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
- if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
+ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
brelse(bh);
return 0;
}
diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index d01e8c9d7a31..926f87cd6af0 100644
--- a/fs/smbfs_common/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
/*
- * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ * SMB, CIFS, SMB2 FSCTL definitions
*
* Copyright (c) International Business Machines Corp., 2002,2013
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index e93375c710b9..cc7338f9e0d1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
- __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
-#endif /* CONFIG_GENERIC_IOMAP */
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9b3eb6d86200..08237ae8b840 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
}
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
#include <asm-generic/pci_iomap.h>
#endif
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index df636c6d8e6c..5a2f9bf53384 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index c6bc45ae5e03..435777a0073c 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
*/
#ifndef _NET_DSA_TAG_OCELOT_H
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ffb787d5ebde..5e6dc38f418e 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -80,6 +80,9 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
static inline struct mdio_driver *
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..8d6571feb95d 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _LINUX_PACKING_H
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e12b524426b0..39039ce8ac4c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1471,6 +1471,7 @@ struct task_struct {
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
+ int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5265024e8b90..207101a9c5c3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -27,6 +27,12 @@ enum iter_type {
ITER_DISCARD,
};
+struct iov_iter_state {
+ size_t iov_offset;
+ size_t count;
+ unsigned long nr_segs;
+};
+
struct iov_iter {
u8 iter_type;
bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
};
loff_t xarray_start;
};
- size_t truncated;
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
return i->iter_type;
}
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
* conversion in assignement is by definition greater than all
* values of size_t, including old i->count.
*/
- if (i->count > count) {
- i->truncated += i->count - count;
+ if (i->count > count)
i->count = count;
- }
}
/*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
*/
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
- i->truncated -= count - i->count;
i->count = count;
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 258867eff230..d784e76113b8 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -585,8 +585,16 @@ struct dsa_switch_ops {
int (*change_tag_protocol)(struct dsa_switch *ds, int port,
enum dsa_tag_protocol proto);
+ /* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -1046,6 +1054,7 @@ static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
diff --git a/include/net/sock.h b/include/net/sock.h
index 708b9de3cdbb..879980de3dcd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1628,6 +1628,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
release_sock(sk);
__release(&sk->sk_lock.slock);
} else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
spin_unlock_bh(&sk->sk_lock.slock);
}
}
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 9f73ed2cf061..bca73e8c8cde 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -306,11 +306,13 @@ enum afs_flock_operation {
enum afs_cb_break_reason {
afs_cb_break_no_break,
+ afs_cb_break_no_promise,
afs_cb_break_for_callback,
afs_cb_break_for_deleted,
afs_cb_break_for_lapsed,
+ afs_cb_break_for_s_reinit,
afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
+ afs_cb_break_for_v_break,
afs_cb_break_for_volume_callback,
afs_cb_break_for_zap,
};
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
+ EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
EM(afs_cb_break_for_deleted, "break-del") \
EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_v_break, "break-v") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
E_(afs_cb_break_for_zap, "break-zap")
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
index 69829205fdb5..8e87d27b0951 100644
--- a/include/uapi/linux/cifs/cifs_mount.h
+++ b/include/uapi/linux/cifs/cifs_mount.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
*
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 59ef35154e3d..b270a07b285e 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -317,13 +317,19 @@ enum {
IORING_REGISTER_IOWQ_AFF = 17,
IORING_UNREGISTER_IOWQ_AFF = 18,
- /* set/get max number of workers */
+ /* set/get max number of io-wq workers */
IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
/* this goes last */
IORING_REGISTER_LAST
};
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
diff --git a/init/main.c b/init/main.c
index 3f7216934441..81a79a77db46 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1242,7 +1242,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
{
ktime_t *calltime = (ktime_t *)data;
- printk(KERN_DEBUG "calling %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
@@ -1256,8 +1256,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
rettime = ktime_get();
delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
- fn, ret, duration, irqs_disabled());
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+ fn, ret, duration);
}
static ktime_t initcall_calltime;
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 6c90c69e5311..95445bd6eb72 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
} else if (rc == -EEXIST) {
- pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+ err_printk(entry->dev, entry,
+ "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 7ee5284bff58..06fec5547e7c 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
/**
* dma_map_sg_attrs - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
- * @sg: The sg_table object describing the buffer
+ * @sg: The sg_table object describing the buffer
+ * @nents: Number of entries to map
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 349f80aa9e7d..20367196fa9a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10193,7 +10193,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
return;
if (ifh->nr_file_filters) {
- mm = get_task_mm(event->ctx->task);
+ mm = get_task_mm(task);
if (!mm)
goto restart;
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
index 4ba15088e640..88191f6e252c 100644
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -41,6 +41,12 @@
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
* Common code shared between RT rw_semaphore and rwlock
*/
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
+ /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
return 1;
}
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
+ *
+ * dec_and_test() is fully ordered, provides RELEASE.
*/
if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
- atomic_add(READER_BIAS - bias, &rwb->readers);
+ /*
+ * _release() is needed in case that reader is in fast path, pairing
+ * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+ */
+ (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm);
}
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+ /* Can do without CAS because we're serialized by wait_lock. */
+ lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+ /*
+ * _acquire is needed in case the reader is in the fast path, pairing
+ * with rwbase_read_unlock(), provides ACQUIRE.
+ */
+ if (!atomic_read_acquire(&rwb->readers)) {
+ atomic_set(&rwb->readers, WRITER_BIAS);
+ return 1;
+ }
+
+ return 0;
+}
+
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state)
{
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * set_current_state() for rw_semaphore
- * current_save_and_set_rtlock_wait_state() for rwlock
- */
- rwbase_set_and_save_current_state(state);
+ if (__rwbase_write_trylock(rwb))
+ goto out_unlock;
- /* Block until all readers have left the critical section. */
- for (; atomic_read(&rwb->readers);) {
+ rwbase_set_and_save_current_state(state);
+ for (;;) {
/* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) {
- __set_current_state(TASK_RUNNING);
+ rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
return -EINTR;
}
+
+ if (__rwbase_write_trylock(rwb))
+ break;
+
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+ rwbase_schedule();
+ raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * Schedule and wait for the readers to leave the critical
- * section. The last reader leaving it wakes the waiter.
- */
- if (atomic_read(&rwb->readers) != 0)
- rwbase_schedule();
set_current_state(state);
- raw_spin_lock_irqsave(&rtm->wait_lock, flags);
}
-
- atomic_set(&rwb->readers, WRITER_BIAS);
rwbase_restore_current_state();
+
+out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0;
}
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- if (!atomic_read(&rwb->readers)) {
- atomic_set(&rwb->readers, WRITER_BIAS);
+ if (__rwbase_write_trylock(rwb)) {
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f2d50d69a6c3..755c10c5138c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+ !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
diff --git a/lib/packing.c b/lib/packing.c
index 6ed72dccfdb5..9a72f4bbf0e2 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 2d3eb1cb73b8..ce39ce9f3526 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
#endif /* CONFIG_PCI */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b762215d73eb..6da5020a8656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
/* memcg and lruvec stats flushing */
static void flush_memcg_stats_dwork(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
static DEFINE_SPINLOCK(stats_flush_lock);
#define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
- queue_work(system_unbound_wq, &stats_flush_work);
}
/**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
}
-static void flush_memcg_stats_work(struct work_struct *w)
-{
- mem_cgroup_flush_stats();
-}
-
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
diff --git a/mm/memory.c b/mm/memory.c
index 25fc46e87214..adf9b9ef8277 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
diff --git a/mm/workingset.c b/mm/workingset.c
index d4268d8e9a82..d5b81e4f4cbe 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
+ mem_cgroup_flush_stats();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
diff --git a/net/core/dev.c b/net/core/dev.c
index f930329f0dc2..62ddd7d6e00d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6925,12 +6925,16 @@ EXPORT_SYMBOL(napi_disable);
*/
void napi_enable(struct napi_struct *n)
{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
- if (n->dev->threaded && n->thread)
- set_bit(NAPI_STATE_THREADED, &n->state);
+ unsigned long val, new;
+
+ do {
+ val = READ_ONCE(n->state);
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
+
+ new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
+ if (n->dev->threaded && n->thread)
+ new |= NAPIF_STATE_THREADED;
+ } while (cmpxchg(&n->state, val, new) != val);
}
EXPORT_SYMBOL(napi_enable);
diff --git a/net/core/sock.c b/net/core/sock.c
index 62627e868e03..512e629f9780 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3179,17 +3179,15 @@ EXPORT_SYMBOL(sock_init_data);
void lock_sock_nested(struct sock *sk, int subclass)
{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -3227,24 +3225,35 @@ EXPORT_SYMBOL(release_sock);
*/
bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
- if (!sk->sk_lock.owned)
+ if (!sk->sk_lock.owned) {
/*
- * Note : We must disable BH
+ * Fast path return with bottom halves disabled and
+ * sock::sk_lock.slock held.
+ *
+ * The 'mutex' is not contended and holding
+ * sock::sk_lock.slock prevents all other lockers to
+ * proceed so the corresponding unlock_sock_fast() can
+ * avoid the slow path of release_sock() completely and
+ * just release slock.
+ *
+ * From a semantical POV this is equivalent to 'acquiring'
+ * the 'mutex', hence the corresponding lockdep
+ * mutex_release() has to happen in the fast path of
+ * unlock_sock_fast().
*/
return false;
+ }
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
__acquire(&sk->sk_lock.slock);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
return true;
}
EXPORT_SYMBOL(lock_sock_fast);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 96f211f52ac3..a020339e1973 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -429,6 +429,7 @@ static int dsa_port_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
+ struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
@@ -438,6 +439,12 @@ static int dsa_port_setup(struct dsa_port *dp)
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
+ if (ds->ops->port_setup) {
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -480,8 +487,11 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
- if (err)
+ if (err) {
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
return err;
+ }
dp->setup = true;
@@ -533,11 +543,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+
devlink_port_type_clear(dlp);
switch (dp->type) {
@@ -581,6 +595,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
dp->devlink_port_setup = false;
}
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ return err;
+
+ if (ds->ops->port_setup) {
+ /* On error, leave the devlink port registered,
+ * dsa_switch_teardown will clean it up later.
+ */
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -833,7 +877,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
devlink_params_publish(ds->devlink);
if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
goto teardown;
@@ -843,13 +887,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto teardown;
+ goto free_slave_mii_bus;
}
ds->setup = true;
return 0;
+free_slave_mii_bus:
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
@@ -872,8 +919,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
if (!ds->setup)
return;
- if (ds->slave_mii_bus && ds->ops->phy_read)
+ if (ds->slave_mii_bus && ds->ops->phy_read) {
mdiobus_unregister(ds->slave_mii_bus);
+ mdiobus_free(ds->slave_mii_bus);
+ ds->slave_mii_bus = NULL;
+ }
dsa_switch_unregister_notifier(ds);
@@ -933,12 +983,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
if (err) {
- dsa_port_devlink_teardown(dp);
- dp->type = DSA_PORT_TYPE_UNUSED;
- err = dsa_port_devlink_setup(dp);
+ err = dsa_port_reinit_as_unused(dp);
if (err)
goto teardown;
- continue;
}
}
@@ -1043,6 +1090,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
teardown_master:
dsa_tree_teardown_master(dst);
teardown_switches:
+ dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1557,3 +1605,53 @@ void dsa_unregister_switch(struct dsa_switch *ds)
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+ struct net_device *master, *slave_dev;
+ LIST_HEAD(unregister_list);
+ struct dsa_port *dp;
+
+ mutex_lock(&dsa2_mutex);
+ rtnl_lock();
+
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ if (!dsa_port_is_user(dp))
+ continue;
+
+ master = dp->cpu_dp->master;
+ slave_dev = dp->slave;
+
+ netdev_upper_dev_unlink(master, slave_dev);
+ /* Just unlinking ourselves as uppers of the master is not
+ * sufficient. When the master net device unregisters, that will
+ * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+ * and trigger a dev_close on our own devices (dsa_slave_close).
+ * In turn, that will call dev_mc_unsync on the master's net
+ * device. If the master is also a DSA switch port, this will
+ * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+ * its own master. Lockdep will complain about the fact that
+ * all cascaded masters have the same dsa_master_addr_list_lock_key,
+ * which it normally would not do if the cascaded masters would
+ * be in a proper upper/lower relationship, which we've just
+ * destroyed.
+ * To suppress the lockdep warnings, let's actually unregister
+ * the DSA slave interfaces too, to avoid the nonsensical
+ * multicast address list synchronization on shutdown.
+ */
+ unregister_netdevice_queue(slave_dev, &unregister_list);
+ }
+ unregister_netdevice_many(&unregister_list);
+
+ rtnl_unlock();
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index d37ab98e7fe1..8025ed778d33 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
#include <soc/mscc/ocelot.h>
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 3038a257ba05..59072930cb02 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*
* An implementation of the software-defined tag_8021q.c tagger format, which
* also preserves full functionality under a vlan_filtering bridge. It does
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 75ca4b6e484f..9e8100728d46 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
rcu_assign_pointer(old->nh_grp, newg);
if (newg->resilient) {
+ /* Make sure concurrent readers are not using 'oldg' anymore. */
+ synchronize_net();
rcu_assign_pointer(oldg->res_table, tmp_table);
rcu_assign_pointer(oldg->spare->res_table, tmp_table);
}
@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
};
static int nexthops_dump(struct net *net, struct notifier_block *nb,
+ enum nexthop_event_type event_type,
struct netlink_ext_ack *extack)
{
struct rb_root *root = &net->nexthop.rb_root;
@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
struct nexthop *nh;
nh = rb_entry(node, struct nexthop, rb_node);
- err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
- extack);
+ err = call_nexthop_notifier(nb, net, event_type, nh, extack);
if (err)
break;
}
@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
int err;
rtnl_lock();
- err = nexthops_dump(net, nb, extack);
+ err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
if (err)
goto unlock;
err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
- nb);
+ int err;
+
+ rtnl_lock();
+ err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
+ if (err)
+ goto unlock;
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+unlock:
+ rtnl_unlock();
+ return err;
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 4afd9e71e5c2..1cc8a76b39f9 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -510,7 +510,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
goto out_rel;
}
/* get address to which the internal TCP socket is bound */
- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
+ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
+ goto out_rel;
/* analyze IP specific data of net_device belonging to TCP socket */
addr6 = (struct sockaddr_in6 *)&addrs;
rcu_read_lock();
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 4d0fcd8f8eba..f57449089a16 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1468,7 +1468,9 @@ static void smc_conn_abort_work(struct work_struct *work)
abort_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ lock_sock(&smc->sk);
smc_conn_kill(conn, true);
+ release_sock(&smc->sk);
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang
index 4cce8fd0779c..51fc23e2e9e5 100644
--- a/scripts/Makefile.clang
+++ b/scripts/Makefile.clang
@@ -29,7 +29,12 @@ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
else
CLANG_FLAGS += -fintegrated-as
endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
CLANG_FLAGS += -Werror=unknown-warning-option
+CLANG_FLAGS += -Werror=ignored-optimization-argument
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index eef56d629799..48585c4d04ad 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -13,7 +13,7 @@
# Stage 2 is handled by this file and does the following
# 1) Find all modules listed in modules.order
# 2) modpost is then used to
-# 3) create one <module>.mod.c file pr. module
+# 3) create one <module>.mod.c file per module
# 4) create one Module.symvers file with CRC for all exported symbols
# Step 3 is used to place certain information in the module's ELF
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index b9b0f15e5880..217d21abc86e 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
"continue.")
if args.commit:
+ if args.commit.startswith('HEAD'):
+ sys.exit("The --commit option can't use the HEAD ref")
+
args.find = False
if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
lines = []
defined = []
references = []
- skip = False
if not os.path.exists(kfile):
return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
- skip = False
- elif REGEX_KCONFIG_HELP.match(line):
- skip = True
- elif skip:
- # ignore content of help messages
- pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index 0033eedce003..1d1bde1fd45e 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -13,6 +13,7 @@ import logging
import os
import re
import subprocess
+import sys
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index d8886720e83d..8441e3e1aaac 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd)
+ *fd = -1;
}
}
}
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
{
struct perf_evsel *leader = evsel->leader;
- int fd;
+ int *fd;
if (evsel == leader) {
*group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
return -ENOTCONN;
fd = FD(leader, cpu, thread);
- if (fd == -1)
+ if (fd == NULL || *fd == -1)
return -EBADF;
- *group_fd = fd;
+ *group_fd = *fd;
return 0;
}
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd, group_fd;
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, cpu, thread);
+ if (evsel_fd == NULL)
+ return -EINVAL;
err = get_group_fd(evsel, cpu, thread, &group_fd);
if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
if (fd < 0)
return -errno;
- FD(evsel, cpu, thread) = fd;
+ *evsel_fd = fd;
}
}
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
}
}
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
- perf_mmap__munmap(map);
+ perf_mmap__munmap(MMAP(evsel, cpu, thread));
}
}
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
+ struct perf_mmap *map;
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
+ map = MMAP(evsel, cpu, thread);
perf_mmap__init(map, NULL, false, NULL);
- ret = perf_mmap__mmap(map, &mp, fd, cpu);
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
if (ret) {
perf_evsel__munmap(evsel);
return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
{
- if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
return NULL;
return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu, thread);
memset(count, 0, sizeof(*count));
- if (FD(evsel, cpu, thread) < 0)
+ if (fd == NULL || *fd < 0)
return -EINVAL;
if (MMAP(evsel, cpu, thread) &&
!perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
return 0;
- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+ if (readn(*fd, count->values, size) <= 0)
return -errno;
return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, ioc, arg);
+ int err;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ err = ioctl(*fd, ioc, arg);
if (err)
return err;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 0e824f7d8b19..6211d0b84b7a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
return OUTPUT_TYPE_OTHER;
}
-static inline unsigned int attr_type(unsigned int type)
-{
- switch (type) {
- case OUTPUT_TYPE_SYNTH:
- return PERF_TYPE_SYNTH;
- default:
- return type;
- }
-}
-
static bool output_set_by_user(void)
{
int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
+static struct evsel *find_first_output_type(struct evlist *evlist,
+ unsigned int type)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (output_type(evsel->core.attr.type) == (int)type)
+ return evsel;
+ }
+ return NULL;
+}
+
/*
* verify all user requested events exist and the samples
* have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
- evsel = perf_session__find_first_evtype(session, attr_type(j));
+ evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 781afe42e90e..fa5bd5c20e96 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
}
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down)
+ unsigned int row, int diff, bool arrow_down)
{
- unsigned int end_row;
+ int end_row;
- if (row >= browser->top_idx)
- end_row = row - browser->top_idx;
- else
+ if (diff <= 0)
return;
SLsmg_set_char_set(1);
if (arrow_down) {
+ if (row + diff <= browser->top_idx)
+ return;
+
+ end_row = row + diff - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
- SLsmg_write_char(SLSMG_ULCORN_CHAR);
- ui_browser__gotorc(browser, end_row, column);
- SLsmg_draw_hline(2);
- ui_browser__gotorc(browser, end_row + 1, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+ while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_draw_vline(1);
+ }
+
+ end_row = (int)(row - browser->top_idx);
+ if (end_row >= 0) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_write_char(SLSMG_ULCORN_CHAR);
+ ui_browser__gotorc(browser, end_row, column);
+ SLsmg_draw_hline(2);
+ }
} else {
+ if (row < browser->top_idx)
+ return;
+
+ end_row = row - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
ui_browser__gotorc(browser, end_row, column);
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 3678eb88f119..510ce4554050 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end);
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down);
+ unsigned int row, int diff, bool arrow_down);
void __ui_browser__show_title(struct ui_browser *browser, const char *title);
void ui_browser__show_title(struct ui_browser *browser, const char *title);
int ui_browser__show(struct ui_browser *browser, const char *title,
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ef4da4295bf7..e81c2493efdf 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ab->selection = al;
}
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
+ int diff = 1;
+
+ while (pos && pos->al.offset == -1) {
+ pos = list_prev_entry(pos, al.node);
+ if (!ab->opts->hide_src_code)
+ diff++;
+ }
if (!pos)
- return false;
+ return 0;
if (ins__is_lock(&pos->ins))
name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
name = pos->ins.name;
if (!name || !cursor->ins.name)
- return false;
+ return 0;
- return ins__is_fused(ab->arch, name, cursor->ins.name);
+ if (ins__is_fused(ab->arch, name, cursor->ins.name))
+ return diff;
+ return 0;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
int width;
+ int diff = 0;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
pcnt_width + 2 + notes->widths.addr + width,
from, to);
- if (is_fused(ab, cursor)) {
+ diff = is_fused(ab, cursor);
+ if (diff > 0) {
ui_browser__mark_fused(browser,
pcnt_width + 3 + notes->widths.addr + width,
- from - 1,
- to > from);
+ from - diff, diff, to > from);
}
}
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 683f6d63560e..1a7112a87736 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -24,7 +24,10 @@
struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
{
struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
return err ? ERR_PTR(err) : btf;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index da19be7da284..44e40bad0e33 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
al.filtered = 0;
al.sym = NULL;
+ al.srcline = NULL;
if (!cpumode) {
thread__find_cpumode_addr_location(thread, ip, &al);
} else {
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index beee0d5646a6..f7d84549cc3e 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# Copyright 2020 NXP Semiconductors
+# Copyright 2020 NXP
WAIT_TIME=1
NUM_NETIFS=4
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index cfc7f4f97fd1..df341648f818 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
include ../../lib.mk
-
-all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
index 0f3e3763f4f8..3dece8b29253 100644
--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
+++ b/tools/testing/selftests/net/af_unix/test_unix_oob.c
@@ -271,8 +271,9 @@ main(int argc, char **argv)
read_oob(pfd, &oob);
if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
- fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ",
- "atmark %d\n", signal_recvd, len, oob, atmark);
+ fprintf(stderr,
+ "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
+ signal_recvd, len, oob, atmark);
die(1);
}
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
index bd1ca25febe4..aed632d29fff 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
#include <asm/unistd.h>
.text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
1:
li r3, -1
blr
+
+
+.macro scv level
+ .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ scv 0
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
+
+FUNC_START(getppid_scv_tm_suspended)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ tsuspend.
+ scv 0
+ tresume.
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index 467a6b3134b2..b763354c2eb4 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -19,23 +19,36 @@
#include "utils.h"
#include "tm.h"
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+#endif
+
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
- if (suspend)
- pid = getppid_tm_suspended();
- else
- pid = getppid_tm_active();
+ if (suspend) {
+ if (scv)
+ pid = getppid_scv_tm_suspended();
+ else
+ pid = getppid_tm_suspended();
+ } else {
+ if (scv)
+ pid = getppid_scv_tm_active();
+ else
+ pid = getppid_tm_active();
+ }
if (pid >= 0)
return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
- FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
- FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
+ FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+
+ /* Now do it all again with scv if it is available. */
+ if (have_hwcap2(PPC_FEATURE2_SCV)) {
+ FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
+ FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+ }
+
gettimeofday(&now, 0);
}