summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format13
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml15
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml4
-rw-r--r--Documentation/riscv/vm-layout.rst4
-rw-r--r--Documentation/virt/kvm/mmu.rst4
-rw-r--r--Documentation/vm/slub.rst10
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile7
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arc/kernel/signal.c43
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/include/asm/cpuidle.h5
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/riscv/Kconfig18
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi2
-rw-r--r--arch/riscv/include/asm/alternative-macros.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/riscv/kernel/traps.c13
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S15
-rw-r--r--arch/riscv/mm/kasan_init.c10
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/events/intel/uncore_snbep.c9
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/lapic.c20
-rw-r--r--arch/x86/kvm/mmu/mmu.c26
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h14
-rw-r--r--arch/x86/kvm/svm/avic.c6
-rw-r--r--arch/x86/kvm/svm/sev.c26
-rw-r--r--arch/x86/kvm/trace.h6
-rw-r--r--arch/x86/kvm/vmx/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--arch/x86/pci/fixup.c44
-rw-r--r--crypto/async_tx/async_xor.c3
-rw-r--r--drivers/acpi/bus.c27
-rw-r--r--drivers/acpi/sleep.c4
-rw-r--r--drivers/block/loop.c25
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/bus/mhi/pci_generic.c42
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c245
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c3
-rw-r--r--drivers/dma/idxd/cdev.c1
-rw-r--r--drivers/dma/idxd/init.c63
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c27
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/qcom/Kconfig1
-rw-r--r--drivers/dma/sf-pdma/Kconfig1
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_ioctl.c9
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c155
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/hwmon/corsair-psu.c14
-rw-r--r--drivers/hwmon/scpi-hwmon.c9
-rw-r--r--drivers/hwmon/tps23861.c17
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c7
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c1
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c20
-rw-r--r--drivers/md/bcache/stats.c14
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/misc/cardreader/rtl8411.c1
-rw-r--r--drivers/misc/cardreader/rts5209.c1
-rw-r--r--drivers/misc/cardreader/rts5227.c2
-rw-r--r--drivers/misc/cardreader/rts5228.c1
-rw-r--r--drivers/misc/cardreader/rts5229.c1
-rw-r--r--drivers/misc/cardreader/rts5249.c3
-rw-r--r--drivers/misc/cardreader/rts5260.c1
-rw-r--r--drivers/misc/cardreader/rts5261.c1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c44
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c9
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/can/usb/mcba_usb.c17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c54
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c46
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c15
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/hamradio/mkiss.c1
-rw-r--r--drivers/net/mhi/net.c2
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/smsc75xx.c10
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c108
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c138
-rw-r--r--drivers/pci/controller/pci-aardvark.c49
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/quirks.c93
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h4
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c4
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c3
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c18
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/surface/aggregator/controller.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c7
-rw-r--r--drivers/platform/surface/surface_dtx.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/atc260x-regulator.c19
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/cros-ec-regulator.c3
-rw-r--r--drivers/regulator/da9121-regulator.c10
-rw-r--r--drivers/regulator/fan53555.c3
-rw-r--r--drivers/regulator/fan53880.c3
-rw-r--r--drivers/regulator/fixed.c7
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/hi6421v600-regulator.c4
-rw-r--r--drivers/regulator/hi655x-regulator.c2
-rw-r--r--drivers/regulator/max77620-regulator.c17
-rw-r--r--drivers/regulator/mt6315-regulator.c2
-rw-r--r--drivers/regulator/rt4801-regulator.c4
-rw-r--r--drivers/regulator/rtmv20-regulator.c44
-rw-r--r--drivers/regulator/scmi-regulator.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c11
-rw-r--r--drivers/scsi/hosts.c47
-rw-r--r--drivers/spi/spi-bcm2835.c10
-rw-r--r--drivers/spi/spi-bitbang.c18
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-omap-uwire.c9
-rw-r--r--drivers/spi/spi-omap2-mcspi.c33
-rw-r--r--drivers/spi/spi-pxa2xx.c9
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-zynq-qspi.c7
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c8
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c7
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c16
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c21
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c13
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c18
-rw-r--r--drivers/usb/gadget/config.c8
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/function/f_hid.c3
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/function/f_printer.c3
-rw-r--r--drivers/usb/gadget/function/f_rndis.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c3
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c2
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/serial/cp210x.c84
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c15
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c123
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c1
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/afs/write.c15
-rw-r--r--fs/btrfs/block-group.c8
-rw-r--r--fs/btrfs/disk-io.c26
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/tree-log.c16
-rw-r--r--fs/btrfs/zoned.c23
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/io_uring.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c37
-rw-r--r--fs/nfs/nfstrace.h4
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/proc/base.c11
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/dt-bindings/usb/pd.h89
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/compiler_attributes.h1
-rw-r--r--include/linux/entry-kvm.h3
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/mfd/rohm-bd70528.h4
-rw-r--r--include/linux/mfd/rohm-bd71828.h10
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mm_types.h27
-rw-r--r--include/linux/ptp_clock_kernel.h2
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rtsx_pci.h2
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/swapops.h15
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/net/mac80211.h9
-rw-r--r--include/net/net_namespace.h14
-rw-r--r--include/net/sock.h17
-rw-r--r--include/uapi/asm-generic/unistd.h3
-rw-r--r--include/uapi/linux/in.h3
-rw-r--r--include/uapi/linux/io_uring.h19
-rw-r--r--kernel/bpf/verifier.c68
-rw-r--r--kernel/cgroup/cgroup-v1.c4
-rw-r--r--kernel/crash_core.c1
-rw-r--r--kernel/entry/common.c5
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/irq_work.c3
-rw-r--r--kernel/printk/printk_safe.c2
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c28
-rw-r--r--kernel/sched/pelt.h11
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace_clock.c6
-rw-r--r--mm/huge_memory.c56
-rw-r--r--mm/hugetlb.c135
-rw-r--r--mm/internal.h53
-rw-r--r--mm/memory-failure.c36
-rw-r--r--mm/memory.c41
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/page_vma_mapped.c27
-rw-r--r--mm/pgtable-generic.c5
-rw-r--r--mm/rmap.c39
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c37
-rw-r--r--mm/sparse.c13
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c43
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_vlan_tunnel.c38
-rw-r--r--net/can/bcm.c62
-rw-r--r--net/can/isotp.c61
-rw-r--r--net/can/j1939/transport.c54
-rw-r--r--net/can/raw.c62
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net_namespace.c20
-rw-r--r--net/core/rtnetlink.c8
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ethtool/eeprom.c2
-rw-r--r--net/ethtool/ioctl.c10
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c22
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/kcm/kcmsock.c5
-rw-r--r--net/mac80211/debugfs.c11
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c19
-rw-r--r--net/mac80211/main.c7
-rw-r--r--net/mac80211/mlme.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/scan.c21
-rw-r--r--net/mac80211/tx.c52
-rw-r--r--net/mac80211/util.c24
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/protocol.c52
-rw-r--r--net/mptcp/protocol.h1
-rw-r--r--net/mptcp/subflow.c108
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c85
-rw-r--r--net/packet/af_packet.c41
-rw-r--r--net/qrtr/qrtr.c2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/sched/act_ct.c21
-rw-r--r--net/sched/sch_cake.c18
-rw-r--r--net/socket.c13
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c13
-rw-r--r--net/wireless/pmsr.c16
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/util.c3
-rw-r--r--scripts/recordmcount.h15
-rw-r--r--sound/core/seq/seq_timer.c10
-rw-r--r--sound/firewire/amdtp-stream.c2
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/soc/codecs/rt5659.c26
-rw-r--r--sound/soc/codecs/rt5682-sdw.c3
-rw-r--r--sound/soc/codecs/tas2562.h14
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c1
-rw-r--r--sound/soc/qcom/lpass-cpu.c79
-rw-r--r--sound/soc/qcom/lpass.h4
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/soc-topology.c6
-rw-r--r--sound/soc/sof/pm.c1
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h7
-rw-r--r--tools/bootconfig/include/linux/bootconfig.h4
-rw-r--r--tools/bootconfig/main.c1
-rw-r--r--tools/include/uapi/asm-generic/unistd.h3
-rw-r--r--tools/include/uapi/linux/in.h3
-rw-r--r--tools/lib/bpf/xsk.c2
-rw-r--r--tools/objtool/arch/x86/decode.c4
-rw-r--r--tools/objtool/elf.c25
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh4
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/metricgroup.c14
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c7
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h10
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c54
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c38
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c2
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh25
-rwxr-xr-xtools/testing/selftests/net/icmp.sh74
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh11
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rwxr-xr-xtools/testing/selftests/net/veth.sh5
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh221
448 files changed, 4172 insertions, 1893 deletions
diff --git a/.clang-format b/.clang-format
index c24b147cac01..15d4eaabc6b5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -109,8 +109,8 @@ ForEachMacros:
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- - 'cxl_for_each_cmd'
- 'device_for_each_child_node'
+ - 'displayid_iter_for_each'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
+ - 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- - 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
+ - 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
+ - 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
+ - 'for_each_prop_codec_conf'
+ - 'for_each_prop_dai_codec'
+ - 'for_each_prop_dai_cpu'
+ - 'for_each_prop_dlc_codecs'
+ - 'for_each_prop_dlc_cpus'
+ - 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
+ - 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 32509b98142e..92b49bc37939 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -149,6 +149,17 @@ properties:
maxItems: 6
$ref: /schemas/types.yaml#/definitions/uint32-array
+ sink-vdos-v1:
+ description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+ providing additional information corresponding to the product, the detailed bit
+ definitions and the order of each VDO can be found in
+ "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+ Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+ dt-bindings/usb/pd.h.
+ minItems: 3
+ maxItems: 6
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
op-sink-microwatt:
description: Sink required operating power in microwatt, if source can't
offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
SNK_READY for non-pd link.
type: boolean
+dependencies:
+ sink-vdos-v1: [ 'sink-vdos' ]
+ sink-vdos: [ 'sink-vdos-v1' ]
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
index 33ee575bb09d..926be9a29044 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
@@ -49,7 +49,7 @@ examples:
#size-cells = <0>;
adc@48 {
- comatible = "ti,ads7828";
+ compatible = "ti,ads7828";
reg = <0x48>;
vref-supply = <&vref>;
ti,differential-input;
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.yaml b/Documentation/devicetree/bindings/media/renesas,drif.yaml
index ce505a7c006a..9cd56ff2c316 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,drif.yaml
@@ -67,9 +67,7 @@ properties:
maxItems: 1
clock-names:
- maxItems: 1
- items:
- - const: fck
+ const: fck
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
index db61f0731a20..2e35aeaa8781 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
@@ -57,7 +57,7 @@ patternProperties:
rate
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the CPU DAI
patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
properties:
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the codec DAI
required:
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index 329d32098af4..b7f98930d38d 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -58,6 +58,6 @@ RISC-V Linux Kernel SV39
|
____________________________________________________________|____________________________________________________________
| | | |
- ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules
- ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel, BPF
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
+ ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
index 5bfe28b0728e..20d85daed395 100644
--- a/Documentation/virt/kvm/mmu.rst
+++ b/Documentation/virt/kvm/mmu.rst
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
shadow pages) so role.quadrant takes values in the range 0..3. Each
quadrant maps 1GB virtual address space.
role.access:
- Inherited guest access permissions in the form uwx. Note execute
- permission is positive, not negative.
+ Inherited guest access permissions from the parent ptes in the form uwx.
+ Note execute permission is positive, not negative.
role.invalid:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
index 03f294a638bd..d3028554b1e9 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/vm/slub.rst
@@ -181,7 +181,7 @@ SLUB Debug output
Here is a sample of slub debug output::
====================================================================
- BUG kmalloc-8: Redzone overwritten
+ BUG kmalloc-8: Right Redzone overwritten
--------------------------------------------------------------------
INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
- Redzone 0xc90f6d28: 00 cc cc cc .
- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
+ Redzone (0xc90f6d28): 00 cc cc cc .
+ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
[<c010523d>] dump_trace+0x63/0x1eb
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
diff --git a/MAINTAINERS b/MAINTAINERS
index b706dd20ff2b..8c5ee008301a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16560,6 +16560,7 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
+M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -18873,6 +18874,13 @@ S: Maintained
F: drivers/usb/host/isp116x*
F: include/linux/usb/isp116x.h
+USB ISP1760 DRIVER
+M: Rui Miguel Silva <rui.silva@linaro.org>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/isp1760/*
+F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
USB LAN78XX ETHERNET DRIVER
M: Woojung Huh <woojung.huh@microchip.com>
M: UNGLinuxDriver@microchip.com
diff --git a/Makefile b/Makefile
index 144d4f8b7eb3..2d7a8df84e2b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Frozen Wasteland
# *DOCUMENTATION*
@@ -929,11 +929,14 @@ CC_FLAGS_LTO += -fvisibility=hidden
# Limit inlining across translation units to reduce binary size
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
-# Check for frame size exceeding threshold during prolog/epilog insertion.
+# Check for frame size exceeding threshold during prolog/epilog insertion
+# when using lld < 13.0.0.
ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
endif
endif
+endif
ifdef CONFIG_LTO
KBUILD_CFLAGS += -fno-lto $(CC_FLAGS_LTO)
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
index 95f8a4380e11..7a5449dfcb29 100644
--- a/arch/arc/include/uapi/asm/sigcontext.h
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -18,6 +18,7 @@
*/
struct sigcontext {
struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index b3ccb9e5ffe4..cb2f88502baf 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,6 +61,41 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
if (err)
return -EFAULT;
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 33ce59d91461..e2146a8da195 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -57,7 +57,6 @@ SECTIONS
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
- _stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0d67ed682e07..bc4ffa7ca04c 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,8 +44,7 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section("__cpuidle_method_of_table") \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 2d5c6bec2b4f..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -50,7 +50,7 @@ l_yes:
1098: nop; \
.pushsection __jump_table, "aw"; \
.long 1098b - ., LABEL - .; \
- FTR_ENTRY_LONG KEY; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index dca66481d0c2..f9e1f5428b9e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -902,6 +902,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
user_write_access_end();
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ goto badframe;
+
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
@@ -915,11 +919,6 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
regs->nip = (unsigned long) &frame->tramp[0];
}
-
- /* Save the siginfo outside of the unsafe block. */
- if (copy_siginfo_to_user(&frame->info, &ksig->info))
- goto badframe;
-
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 043bbeaf407c..a6b36a40897a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/sparsemem.h>
#include <asm/svm.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 16d4d1b6a1ff..51622411a7cc 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
bool use_siar = regs_use_siar(regs);
unsigned long siar = mfspr(SPRN_SIAR);
- if (ppmu->flags & PPMU_P10_DD1) {
+ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
if (siar)
return siar;
else
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c5914e70a0fd..18ec0f9bb8d5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -61,11 +61,11 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
@@ -80,9 +80,9 @@ config RISCV
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KPROBES
- select HAVE_KPROBES_ON_FTRACE
- select HAVE_KRETPROBES
+ select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+ select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -231,11 +231,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index ed963761fbd2..30676ebb16eb 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -14,6 +14,7 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
+ select RISCV_ERRATA_ALTERNATIVE
select ERRATA_SIFIVE
help
This enables support for SiFive SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 4be020695428..99ecd8bcfd77 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS_MODULE += -mcmodel=medany
endif
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 622b12771fd3..855c1502d912 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 74c47fe9fc22..d90e4eb0ade8 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 8eef82e4199f..abbb960f90a0 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -273,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 20 21 22>;
+ interrupts = <19 21 22 20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 88c08705f64a..67406c376389 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -51,7 +51,7 @@
REG_ASM " " newlen "\n" \
".word " errata_id "\n"
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
"886 :\n" \
old_c "\n" \
"887 :\n" \
- ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9469f464e71a..380cd3a7e548 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -30,9 +30,8 @@
#define BPF_JIT_REGION_SIZE (SZ_128M)
#ifdef CONFIG_64BIT
-/* KASLR should leave at least 128MB for BPF after the kernel */
-#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0721b9798595..7bc88d8aab97 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section __section(".xip.traps")
+#else
+#define __trap_section
+#endif
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage __visible void name(struct pt_regs *regs) \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
"Oops - load address misaligned");
}
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 4b29b9917f99..a3ff09c4c3f9 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -99,9 +99,22 @@ SECTIONS
}
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
__init_end = .;
+ . = ALIGN(16);
+ .xip.traps : {
+ __xip_traps_start = .;
+ *(.xip.traps)
+ __xip_traps_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 9daacae93e33..d7189c8714a9 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
void __init kasan_init(void)
{
- phys_addr_t _start, _end;
+ phys_addr_t p_start, p_end;
u64 i;
/*
@@ -189,9 +189,9 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
- for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)__va(_start);
- void *end = (void *)__va(_end);
+ for_each_mem_range(i, &p_start, &p_end) {
+ void *start = (void *)__va(p_start);
+ void *end = (void *)__va(p_end);
if (start >= end)
break;
@@ -201,7 +201,7 @@ void __init kasan_init(void)
/* Populate kernel, BPF, modules mapping */
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
- kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+ kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 12de7a9c85b3..9cc71ca9a88f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
- larl %r13,.Lsie_skip
+ lghi %r13,.Lsie_skip - .Lsie_entry
clgr %r9,%r13
- jh .Lcleanup_sie_int
+ jhe .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
.Lcleanup_sie_int:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 307529417021..cb5e8d39cac1 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -200,8 +200,9 @@ endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
endif
ifdef CONFIG_X86_NEED_RELOCS
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 63f097289a84..3a75a2c601c2 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
die_id = i;
else
die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
map->pbus_to_dieid[bus] = die_id;
break;
}
@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
@@ -5097,9 +5099,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
.ops = &snr_m2m_uncore_pci_ops,
- .format_group = &skx_uncore_format_group,
+ .format_group = &snr_m2m_uncore_format_group,
};
static struct attribute *icx_upi_uncore_formats_attr[] = {
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 3ef5868ac588..7aecb2fc3186 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9a48f138832d..b4da665bb892 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
entry->ecx = F(RDPID);
++array->nent;
+ break;
default:
break;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8120e8614b92..17fa4ab1b834 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
if (!apic_x2apic_mode(apic))
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ if (alignment + len > 4)
+ return 1;
+
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
return 1;
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic);
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ preempt_disable();
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ preempt_enable();
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
if (apic->lapic_timer.timer_mode != timer_mode) {
if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
APIC_LVT_TIMER_TSCDEADLINE)) {
- hrtimer_cancel(&apic->lapic_timer.timer);
- preempt_disable();
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- preempt_enable();
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, 0);
apic->lapic_timer.period = 0;
apic->lapic_timer.tscdeadline = 0;
@@ -2092,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
if (apic_lvtt_tscdeadline(apic))
break;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0144c40d09c7..8d5876dfc6b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4739,9 +4739,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
context->inject_page_fault = kvm_inject_page_fault;
}
+static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+{
+ union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+
+ /*
+ * Nested MMUs are used only for walking L2's gva->gpa, they never have
+ * shadow pages of their own and so "direct" has no meaning. Set it
+ * to "true" to try to detect bogus usage of the nested MMU.
+ */
+ role.base.direct = true;
+
+ if (!is_paging(vcpu))
+ role.base.level = 0;
+ else if (is_long_mode(vcpu))
+ role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (is_pae(vcpu))
+ role.base.level = PT32E_ROOT_LEVEL;
+ else
+ role.base.level = PT32_ROOT_LEVEL;
+
+ return role;
+}
+
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 70b7e44e3035..823a5919f9fa 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -90,8 +90,8 @@ struct guest_walker {
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
bool pte_writable[PT_MAX_FULL_LEVELS];
- unsigned pt_access;
- unsigned pte_access;
+ unsigned int pt_access[PT_MAX_FULL_LEVELS];
+ unsigned int pte_access;
gfn_t gfn;
struct x86_exception fault;
};
@@ -418,13 +418,15 @@ retry_walk:
}
walker->ptes[walker->level - 1] = pte;
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
} while (!is_last_gpte(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
@@ -463,7 +465,8 @@ retry_walk:
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ __func__, (u64)pte, walker->pte_access,
+ walker->pt_access[walker->level - 1]);
return 1;
error:
@@ -643,7 +646,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
- unsigned direct_access, access = gw->pt_access;
+ unsigned int direct_access, access;
int top_level, level, req_level, ret;
gfn_t base_gfn = gw->gfn;
@@ -675,6 +678,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
+ access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
}
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 0e62e6a2438c..5e7e920113f3 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -221,7 +221,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index];
}
-/**
+/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
@@ -764,7 +764,7 @@ out:
return ret;
}
-/**
+/*
* Note:
* The HW cannot support posting multicast/broadcast
* interrupts to a vCPU. So, we still use legacy interrupt
@@ -1005,7 +1005,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/**
+/*
* This function is called during VCPU halt/unhalt.
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 5bc887e9a986..8d36f0c73071 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
sev->misc_cg = NULL;
}
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
{
struct sev_data_decommission decommission;
+
+ if (!handle)
+ return;
+
+ decommission.handle = handle;
+ sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
struct sev_data_deactivate deactivate;
if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_guest_deactivate(&deactivate, NULL);
up_read(&sev_deactivate_lock);
- /* decommission handle */
- decommission.handle = handle;
- sev_guest_decommission(&decommission, NULL);
+ sev_decommission(handle);
}
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error);
- if (ret)
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
/* return handle to userspace */
params.handle = start.handle;
@@ -1103,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_start data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
- if (ret < 0)
- return ret;
params->session_len = data.session_len;
if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1215,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_update_data data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
- if (ret < 0)
- return ret;
params->hdr_len = data.hdr_len;
params->trans_len = data.trans_len;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a61c015870e3..4f839148948b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
TP_ARGS(msg, err),
TP_STRUCT__entry(
- __field(const char *, msg)
+ __string(msg, msg)
__field(u32, err)
),
TP_fast_assign(
- __entry->msg = msg;
+ __assign_str(msg, msg);
__entry->err = err;
),
- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 50b42d7a8a11..c2a779b688e6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6247,6 +6247,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
switch (kvm_get_apic_mode(vcpu)) {
case LAPIC_MODE_INVALID:
WARN_ONCE(true, "Invalid local APIC state");
+ break;
case LAPIC_MODE_DISABLED:
break;
case LAPIC_MODE_XAPIC:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b594275d49b5..e0f4a46649d7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3072,6 +3072,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
+
+ if (!tdp_enabled) {
+ /*
+ * A TLB flush on behalf of the guest is equivalent to
+ * INVPCID(all), toggling CR4.PGE, etc., which requires
+ * a forced sync of the shadow page tables. Unload the
+ * entire MMU here and the subsequent load will sync the
+ * shadow page tables, and also flush the TLB.
+ */
+ kvm_mmu_unload(vcpu);
+ return;
+ }
+
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
@@ -3101,9 +3114,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+ u8 st_preempted = xchg(&st->preempted, 0);
+
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
- st->preempted & KVM_VCPU_FLUSH_TLB);
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ st_preempted & KVM_VCPU_FLUSH_TLB);
+ if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
} else {
st->preempted = 0;
@@ -7091,7 +7106,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ vcpu->arch.hflags = emul_flags;
+ kvm_mmu_reset_context(vcpu);
}
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
@@ -8243,6 +8261,7 @@ void kvm_arch_exit(void)
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
+ kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 02dc64625e64..2edd86649468 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+#define RS690_LOWER_TOP_OF_DRAM2 0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2 0x31
+#define RS690_HTIU_NB_INDEX 0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
+#define RS690_HTIU_NB_DATA 0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+ u32 val = 0;
+ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+ if (top_of_dram <= (1ULL << 32))
+ return;
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2);
+ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+ if (val)
+ return;
+
+ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
#endif
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 6cd7f7025df4..d8a91521144e 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
- src_offs++;
+ if (src_offs)
+ src_offs++;
}
/* wait for any prerequisite operations */
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index be7da23fad76..a4bd673934c0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
- capbuf_ret = context.ret.pointer;
- if (context.ret.length <= OSC_SUPPORT_DWORD) {
- kfree(context.ret.pointer);
- return;
- }
+ kfree(context.ret.pointer);
- /*
- * Now run _OSC again with query flag clear and with the caps
- * supported by both the OS and the platform.
- */
+ /* Now run _OSC again with query flag clear */
capbuf[OSC_QUERY_DWORD] = 0;
- capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
- kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD) {
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- osc_sb_native_usb4_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
- }
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
kfree(context.ret.pointer);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index df386571da98..3bb2adef8490 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs) {
+ if (facs)
s4_hardware_signature = facs->hardware_signature;
- acpi_put_table((struct acpi_table_header *)facs);
- }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d58d68f3c7cd..76e12f3482a9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- /*
- * take loop_ctl_mutex to protect lo pointer from race with
- * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
- * release it prior to updating lo->lo_refcnt.
- */
- err = mutex_lock_killable(&loop_ctl_mutex);
- if (err)
- return err;
- lo = bdev->bd_disk->private_data;
- if (!lo) {
- mutex_unlock(&loop_ctl_mutex);
- return -ENXIO;
- }
err = mutex_lock_killable(&lo->lo_mutex);
- mutex_unlock(&loop_ctl_mutex);
if (err)
return err;
- atomic_inc(&lo->lo_refcnt);
+ if (lo->lo_state == Lo_deleting)
+ err = -ENXIO;
+ else
+ atomic_inc(&lo->lo_refcnt);
mutex_unlock(&lo->lo_mutex);
- return 0;
+ return err;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
mutex_unlock(&lo->lo_mutex);
break;
}
- lo->lo_disk->private_data = NULL;
+ lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index a3c04f310672..5beb959b94d3 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -22,6 +22,7 @@ enum {
Lo_unbound,
Lo_bound,
Lo_rundown,
+ Lo_deleting,
};
struct loop_func_table;
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 7c810f02a2ef..b3357a8a2fdb 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
- MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
- MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer(&mhi_pdev->health_check_timer);
+ del_timer_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
return ret;
}
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ /* We want to stop all operations, hibernation does not guarantee that
+ * device will be in the same state as before freezing, especially if
+ * the intermediate restore kernel reinitializes MHI device with new
+ * context.
+ */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+ /* Reinitialize the device */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return 0;
+}
+
static const struct dev_pm_ops mhi_pci_pm_ops = {
SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mhi_pci_suspend,
+ .resume = mhi_pci_resume,
+ .freeze = mhi_pci_freeze,
+ .thaw = mhi_pci_restore,
+ .restore = mhi_pci_restore,
+#endif
};
static struct pci_driver mhi_pci_driver = {
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
If in doubt, say N.
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 3848b4c222e1..2f769b1630c5 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -10,18 +10,14 @@
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
-#include <linux/arch_topology.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/dmi.h>
-#include <linux/irq_work.h>
-#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
-#include <uapi/linux/sched/types.h>
#include <asm/unaligned.h>
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
}
};
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
-
-/* Frequency invariance support */
-struct cppc_freq_invariance {
- int cpu;
- struct irq_work irq_work;
- struct kthread_work work;
- struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
- struct cppc_cpudata *cpu_data;
-};
-
-static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
-static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
-
-static struct cpufreq_driver cppc_cpufreq_driver;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1);
-
-/**
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
- * @work: The work item.
- *
- * The CPPC driver register itself with the topology core to provide its own
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
- * gets called by the scheduler on every tick.
- *
- * Note that the arch specific counters have higher priority than CPPC counters,
- * if available, though the CPPC driver doesn't need to have any special
- * handling for that.
- *
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
- * reach here from hard-irq context), which then schedules a normal work item
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
- * based on the counter updates since the last tick.
- */
-static void cppc_scale_freq_workfn(struct kthread_work *work)
-{
- struct cppc_freq_invariance *cppc_fi;
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_cpudata *cpu_data;
- unsigned long local_freq_scale;
- u64 perf;
-
- cppc_fi = container_of(work, struct cppc_freq_invariance, work);
- cpu_data = cppc_fi->cpu_data;
-
- if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
- pr_warn("%s: failed to read perf counters\n", __func__);
- return;
- }
-
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
- fb_ctrs);
-
- perf <<= SCHED_CAPACITY_SHIFT;
- local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
- if (WARN_ON(local_freq_scale > 1024))
- local_freq_scale = 1024;
-
- per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
-}
-
-static void cppc_irq_work(struct irq_work *irq_work)
-{
- struct cppc_freq_invariance *cppc_fi;
-
- cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
- kthread_queue_work(kworker_fie, &cppc_fi->work);
-}
-
-static void cppc_scale_freq_tick(void)
-{
- struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
-
- /*
- * cppc_get_perf_ctrs() can potentially sleep, call that from the right
- * context.
- */
- irq_work_queue(&cppc_fi->irq_work);
-}
-
-static struct scale_freq_data cppc_sftd = {
- .source = SCALE_FREQ_SOURCE_CPPC,
- .set_freq_scale = cppc_scale_freq_tick,
-};
-
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_freq_invariance *cppc_fi;
- int i, ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- for_each_cpu(i, policy->cpus) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- cppc_fi->cpu = i;
- cppc_fi->cpu_data = cpu_data;
- kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
- init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
-
- ret = cppc_get_perf_ctrs(i, &fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters: %d\n",
- __func__, ret);
- fie_disabled = true;
- } else {
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- }
- }
-}
-
-static void __init cppc_freq_invariance_init(void)
-{
- struct sched_attr attr = {
- .size = sizeof(struct sched_attr),
- .sched_policy = SCHED_DEADLINE,
- .sched_nice = 0,
- .sched_priority = 0,
- /*
- * Fake (unused) bandwidth; workaround to "fix"
- * priority inheritance.
- */
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
- };
- int ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- kworker_fie = kthread_create_worker(0, "cppc_fie");
- if (IS_ERR(kworker_fie))
- return;
-
- ret = sched_setattr_nocheck(kworker_fie->task, &attr);
- if (ret) {
- pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
- ret);
- kthread_destroy_worker(kworker_fie);
- return;
- }
-
- /* Register for freq-invariance */
- topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
-}
-
-static void cppc_freq_invariance_exit(void)
-{
- struct cppc_freq_invariance *cppc_fi;
- int i;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
- for_each_possible_cpu(i) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- irq_work_sync(&cppc_fi->irq_work);
- }
-
- kthread_destroy_worker(kworker_fie);
- kworker_fie = NULL;
-}
-
-#else
-static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
-}
-
-static inline void cppc_freq_invariance_init(void)
-{
-}
-
-static inline void cppc_freq_invariance_exit(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
- if (ret) {
+ if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret);
- } else {
- cppc_freq_invariance_policy_init(policy, cpu_data);
- }
return ret;
}
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
- u64 reference_perf;
+ u64 reference_perf, delivered_perf;
reference_perf = fb_ctrs_t0.reference_perf;
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1.delivered,
fb_ctrs_t0.delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
- if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
-
- return (reference_perf * delta_delivered) / delta_reference;
-}
-
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
-{
- u64 delivered_perf;
-
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
- fb_ctrs_t1);
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
}
@@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
- int ret;
-
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
@@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
cppc_check_hisi_workaround();
- ret = cpufreq_register_driver(&cppc_cpufreq_driver);
- if (!ret)
- cppc_freq_invariance_init();
-
- return ret;
+ return cpufreq_register_driver(&cppc_cpufreq_driver);
}
static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
static void __exit cppc_cpufreq_exit(void)
{
- cppc_freq_invariance_exit();
cpufreq_unregister_driver(&cppc_cpufreq_driver);
free_cpu_data();
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ab9d9a488a6..39b5b46e880f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -59,6 +59,7 @@ config DMA_OF
#devices
config ALTERA_MSGDMA
tristate "Altera / Intel mSGDMA Engine"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
config XILINX_ZYNQMP_DPDMA
tristate "Xilinx DPDMA Engine"
+ depends on HAS_IOMEM && OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ec909e0b810..4ae057922ef1 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI major version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI minor version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
ppriv->store =
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
if (!ppriv->store) {
+ err = -ENOMEM;
dev_err(dev, "dpaa2_io_store_create() failed\n");
goto err_store;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 302cba5ff779..d4419bf1fede 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
+ rc = -EINVAL;
goto failed;
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 776fd44aff5f..442d55c11a5f 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt <= 0)
+ return;
+
+ irq_entry = &idxd->irq_entries[0];
+ free_irq(irq_entry->vector, irq_entry);
+
+ for (i = 1; i < msixcnt; i++) {
+
+ irq_entry = &idxd->irq_entries[i];
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+ idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ free_irq(irq_entry->vector, irq_entry);
+ }
+
+ idxd_mask_error_interrupts(idxd);
+ pci_free_irq_vectors(pdev);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine->idxd = idxd;
device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.bus = &dsa_bus_type;
engine->conf_dev.type = &idxd_engine_device_type;
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
@@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ destroy_workqueue(idxd->wq);
+}
+
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "Loading RO device config\n");
rc = idxd_device_load_config(idxd);
if (rc < 0)
- goto err;
+ goto err_config;
}
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err;
+ goto err_config;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
@@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
+ err_config:
+ idxd_cleanup_internals(idxd);
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
@@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ idxd_cleanup_internals(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- goto err;
+ goto err_dev_register;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ err_dev_register:
+ idxd_cleanup(idxd);
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
@@ -787,6 +843,7 @@ module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
+ idxd_unregister_driver();
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 0d5c42f7bfa4..97d9a6f04f2a 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -230,7 +230,7 @@ out:
}
/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
* @source: interrupt source bit position (see ipu_irq_map())
* @return: 0 or negative error code
*/
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 27c07350971d..375e7e647df6 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
{
- struct dma_chan *chan = vd->tx.chan;
- struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
- kfree(c->desc);
+ kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
}
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
{
- struct mtk_uart_apdma_desc *d = c->desc;
-
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
}
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
c->rx_status = d->avail_len - cnt;
mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+ struct mtk_uart_apdma_desc *d = c->desc;
+
+ if (d) {
+ list_del(&d->vd.node);
+ vchan_cookie_complete(&d->vd);
+ c->desc = NULL;
+ }
}
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
mtk_uart_apdma_rx_handler(c);
else if (c->dir == DMA_MEM_TO_DEV)
mtk_uart_apdma_tx_handler(c);
+ mtk_uart_apdma_chan_complete_handler(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
return NULL;
/* Now allocate and setup the descriptor */
- d = kzalloc(sizeof(*d), GFP_ATOMIC);
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
if (!d)
return NULL;
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc)) {
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
vd = vchan_next_desc(&c->vc);
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fd8d2bc3be9f..110de8a60058 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
+ unsigned long iflags;
+
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, iflags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
return NULL;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 365f94eb3b08..3f926a653bd8 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA Management.
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
index f8ffa02e279f..ba46a0a15a93 100644
--- a/drivers/dma/sf-pdma/Kconfig
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -1,5 +1,6 @@
config SF_PDMA
tristate "Sifive PDMA controller driver"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d530c1bf11d9..6885b3dcd7a9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
return ret;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 265d7c07b348..e1827393143f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
+ if (base->lcpa_base)
+ iounmap(base->lcpa_base);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 36ba8b43e78d..18cbd1e43c2e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
u32 ccr, id;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 70b29bd079c9..6c709803203a 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -113,6 +113,7 @@
#define XILINX_DPDMA_CH_VDO 0x020
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
#define XILINX_DPDMA_CH_DESC_ID 0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
/* DPDMA descriptor fields */
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
* will be used, but it should be enough.
*/
list_for_each_entry(sw_desc, &desc->descriptors, node)
- sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+ sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
sw_desc = list_first_entry(&desc->descriptors,
struct xilinx_dpdma_sw_desc, node);
@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
if (!chan->running || !pending)
goto out;
- desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+ desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
/* If the retrigger raced with vsync, retry at the next frame. */
sw_desc = list_first_entry(&pending->descriptors,
@@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
*/
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
{
- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
}
@@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
}
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+ void __iomem *reg;
+
+ /* Disable all interrupts */
+ xilinx_dpdma_disable_irq(xdev);
+
+ /* Stop all channels */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * i;
+ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+ }
+
+ /* Clear the interrupt status registers */
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
static int xilinx_dpdma_probe(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
if (IS_ERR(xdev->reg))
return PTR_ERR(xdev->reg);
+ dpdma_hw_init(xdev);
+
xdev->irq = platform_get_irq(pdev, 0);
if (xdev->irq < 0) {
dev_err(xdev->dev, "failed to get platform irq\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d8419565b92c..5fecf5aa6e85 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
struct zynqmp_dma_desc_sw *desc;
int i, ret;
- ret = pm_runtime_get_sync(chan->dev);
+ ret = pm_runtime_resume_and_get(chan->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 1cbce5990855..97e6caedf1f3 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
#define WCD_REG_DIR_CTL_OFFSET 0x42
#define WCD_REG_VAL_CTL_OFFSET 0x43
#define WCD934X_NPINS 5
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8a1fb8b6606e..c13985fb35be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1057,7 +1057,7 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
return 0;
err:
- drm_err(dev, "Failed to init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
@@ -1094,7 +1094,7 @@ int amdgpu_display_gem_fb_verify_and_init(
return 0;
err:
- drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1345f7eba011..f9434bc2f9b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
kfree(ubo->metadata);
}
- kfree(bo);
+ kvfree(bo);
}
/**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
*bo_ptr = NULL;
- bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+ bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7ce76a6b3a35..327b1f8213a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -173,6 +173,9 @@
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1
+
#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
#define GFX_RLCG_GC_WRITE (0x0 << 28)
#define GFX_RLCG_GC_READ (0x1 << 28)
@@ -1480,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
scratch_reg3 = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
- spare_int = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+ + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+ } else {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+ }
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -6861,8 +6871,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
+ /* If GC has entered CGPG, ringing doorbell > first page doesn't
+ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+ * this issue.
+ */
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (adev->doorbell_index.userqueue_end * 2) << 2);
+ (adev->doorbell.size - 4));
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -7349,9 +7363,15 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ } else {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 516467e962b7..c09225d065c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3673,8 +3673,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
+ /* If GC has entered CGPG, ringing doorbell > first page doesn't
+ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+ * this issue.
+ */
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (adev->doorbell_index.userqueue_end * 2) << 2);
+ (adev->doorbell.size - 4));
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index f5fe540cd536..27cf22716793 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
data->fine_grain_enabled = 1;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index f2d46b7ac6f9..232abbba3686 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d273d1a8603a..495a4767a443 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
- mutex_lock(&master->dev->master_mutex);
+ mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index b3fd3501c412..5275b2723293 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
- pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+ pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index b4d8e1b01ee4..f6c1b62b901e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
- gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
a6xx_flush(gpu, ring);
}
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+ for (i = 0; i < count - 1; i++)
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
- uavflagprd_inv >> 4 | lower_bit << 1);
+ uavflagprd_inv << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
}
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
}
/* Protect registers from the CP */
- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
- gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
- A6XX_PROTECT_RDONLY(0x600, 0x51));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
- A6XX_PROTECT_RDONLY(0xfc00, 0x3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
- A6XX_PROTECT_RDONLY(0x0, 0x4f9));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
- A6XX_PROTECT_RDONLY(0x501, 0xa));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
- A6XX_PROTECT_RDONLY(0x511, 0x44));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
- A6XX_PROTECT_RW(0xbe20, 0x11f3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ a6xx_set_cp_protect(gpu);
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ce0610c5256f..bb544dfe5737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -44,7 +44,7 @@ struct a6xx_gpu {
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
* registers starting at _reg.
*/
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 34bc93548fcf..657778889d35 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index e76ce40a12ab..6f96fbac8282 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..369d91e6361e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1241,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
+ /* Call chain get_pages() -> update_inactive() tries to
+ * access msm_obj->mm_list, but it is not initialized yet.
+ * To avoid NULL pointer dereference error, initialize
+ * mm_list to be empty.
+ */
+ INIT_LIST_HEAD(&msm_obj->mm_list);
+
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..06bb24d7a9fe 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
size = radeon_bo_size(rdev->uvd.vcpu_bo);
size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- memset(ptr, 0, size);
+ memset_io((void __iomem *)ptr, 0, size);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index bbdfd5e26ec8..f75fb157f2ff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
of_node_put(phy_node);
if (ret) {
dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
cleanup_encoder:
drm_encoder_cleanup(encoder);
- sun8i_hdmi_phy_remove(hdmi);
err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
- sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.of_match_table = sun8i_dw_hdmi_dt_ids,
},
};
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+ if (ret) {
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d4b55af0592f..74f6ed0e2570 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
struct gpio_desc *ddc_en;
};
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
static inline struct sun8i_dw_hdmi *
encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun8i_dw_hdmi, encoder);
}
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 9994edf67509..c9239708d398 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include "sun8i_dw_hdmi.h"
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{ /* sentinel */ }
};
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct sun8i_hdmi_phy *phy;
+
+ if (!pdev)
+ return -EPROBE_DEFER;
+
+ phy = platform_get_drvdata(pdev);
+ if (!phy)
+ return -EPROBE_DEFER;
+
+ hdmi->phy = phy;
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct device *dev = hdmi->dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct sun8i_hdmi_phy *phy;
struct resource res;
void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
clk_prepare_enable(phy->clk_phy);
}
- hdmi->phy = phy;
+ platform_set_drvdata(pdev, phy);
return 0;
@@ -728,9 +749,9 @@ err_put_clk_bus:
return ret;
}
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
- struct sun8i_hdmi_phy *phy = hdmi->phy;
+ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
+ return 0;
}
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+ .probe = sun8i_hdmi_phy_probe,
+ .remove = sun8i_hdmi_phy_remove,
+ .driver = {
+ .name = "sun8i-hdmi-phy",
+ .of_match_table = sun8i_hdmi_phy_of_table,
+ },
+};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cfd0b9292397..ebcffe794adb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
return -EBUSY;
- if (!ttm_bo_get_unless_zero(bo)) {
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ !ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index a1dcf7d55c90..3d9c62b93e29 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
list_for_each_entry(bo, &man->lru[j], lru) {
- uint32_t num_pages;
+ uint32_t num_pages = PFN_UP(bo->base.size);
- if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
- continue;
-
- num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index bb5529a7a9c2..948b3a58aad1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+ ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
}
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 02298b86b57b..731d5117f9f1 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
return 0;
}
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+ struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+ /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+ return corsairpsu_init(priv);
+}
+#endif
+
static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
.probe = corsairpsu_probe,
.remove = corsairpsu_remove,
.raw_event = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+ .resume = corsairpsu_resume,
+ .reset_resume = corsairpsu_resume,
+#endif
};
module_hid_driver(corsairpsu_driver);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 25aac40f2764..919877970ae3 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
scpi_scale_reading(&value, sensor);
+ /*
+ * Temperature sensor values are treated as signed values based on
+ * observation even though that is not explicitly specified, and
+ * because an unsigned u64 temperature does not really make practical
+ * sense especially when the temperature is below zero degrees Celsius.
+ */
+ if (sensor->info.class == TEMPERATURE)
+ return sprintf(buf, "%lld\n", (s64)value);
+
return sprintf(buf, "%llu\n", value);
}
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index c2484f15298b..8bd6435c13e8 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -99,11 +99,14 @@
#define POWER_ENABLE 0x19
#define TPS23861_NUM_PORTS 4
+#define TPS23861_GENERAL_MASK_1 0x17
+#define TPS23861_CURRENT_SHUNT_MASK BIT(0)
+
#define TEMPERATURE_LSB 652 /* 0.652 degrees Celsius */
#define VOLTAGE_LSB 3662 /* 3.662 mV */
#define SHUNT_RESISTOR_DEFAULT 255000 /* 255 mOhm */
-#define CURRENT_LSB_255 62260 /* 62.260 uA */
-#define CURRENT_LSB_250 61039 /* 61.039 uA */
+#define CURRENT_LSB_250 62260 /* 62.260 uA */
+#define CURRENT_LSB_255 61039 /* 61.039 uA */
#define RESISTANCE_LSB 110966 /* 11.0966 Ohm*/
#define RESISTANCE_LSB_LOW 157216 /* 15.7216 Ohm*/
@@ -117,6 +120,7 @@ struct tps23861_data {
static struct regmap_config tps23861_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x6f,
};
static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
else
data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+ if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+ regmap_clear_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+ else
+ regmap_set_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &tps23861_chip_info,
NULL);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d5e15a8c870d..64e4be1cbec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free_attr;
}
+ if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
err = -EINVAL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 22898d97ecbd..230a6ae0ab5a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
- if (!mlx4_is_slave(dev->dev))
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset);
- if (!err && !mlx4_is_slave(dev->dev)) {
+ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
- if (!rdma_is_port_valid(qp->device, flow_attr->port))
- return ERR_PTR(-EINVAL);
-
if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index eb92cefffd77..9ce01f729673 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
- struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
- cqe = get_cqe(cq, i);
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
- init_cq_frag_buf(cq, &cq->buf);
+ init_cq_frag_buf(&cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto ex;
- init_cq_frag_buf(cq, cq->resize_buf);
+ init_cq_frag_buf(cq->resize_buf);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 61475b571531..7af4df7a6823 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
+ struct mm_struct *mm;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if ((current->mm == page->mm) &&
+ (page->user_virt == (virt & PAGE_MASK)))
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
kfree(page);
goto out;
}
+ mmgrab(current->mm);
+ page->mm = current->mm;
list_add(&page->list, &context->db_page_list);
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
+ mmdrop(db->u.user_page->mm);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 2fc6a60c4e77..18ee2f293825 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto free_ucmd;
}
- if (flow_attr->port > dev->num_ports ||
- (flow_attr->flags &
- ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ if (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
err = -EINVAL;
goto free_ucmd;
}
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+ err = -EINVAL;
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9662cd39c7ff..425423dfac72 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5r_deref_wait_odp_mkey(&mr->mmkey);
if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
- xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
- NULL, GFP_KERNEL);
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, NULL, GFP_KERNEL);
if (mr->mtt_mr) {
rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index d5a90a66b45c..5b05cf3837da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.kind = "ipoib",
+ .netns_refund = true,
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
.priv_size = sizeof(struct ipoib_dev_priv),
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0a4551e165ab..5fc989a6d452 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -364,7 +364,6 @@ struct cached_dev {
/* The rest of this all shows up in sysfs */
unsigned int sequential_cutoff;
- unsigned int readahead;
unsigned int io_disable:1;
unsigned int verify:1;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 29c231758293..6d1de889baeb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
- unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ unsigned int size_limit;
s->cache_missed = 1;
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
- s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
- reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_bdev->bd_disk) -
- bio_end_sector(bio));
-
- s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+ /* Limitation for valid replace key size and cache_bio bvecs number */
+ size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+ (1 << KEY_SIZE_BITS) - 1);
+ s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
s->iop.replace_key = KEY(s->iop.inode,
bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+ miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+ &s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- if (reada)
- bch_mark_cache_readahead(s->iop.c, s->d);
-
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 503aafe188dc..4c7ee5fedb9d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
read_attribute(cache_bypass_hits);
read_attribute(cache_bypass_misses);
read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
read_attribute(cache_miss_collisions);
read_attribute(bypassed);
@@ -64,7 +63,6 @@ SHOW(bch_stats)
DIV_SAFE(var(cache_hits) * 100,
var(cache_hits) + var(cache_misses)));
- var_print(cache_readaheads);
var_print(cache_miss_collisions);
sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
#undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
&sysfs_cache_bypass_hits,
&sysfs_cache_bypass_misses,
&sysfs_cache_hit_ratio,
- &sysfs_cache_readaheads,
&sysfs_cache_miss_collisions,
&sysfs_bypassed,
NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
acc->total.cache_misses = 0;
acc->total.cache_bypass_hits = 0;
acc->total.cache_bypass_misses = 0;
- acc->total.cache_readaheads = 0;
acc->total.cache_miss_collisions = 0;
acc->total.sectors_bypassed = 0;
}
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
scale_stat(&stats->cache_misses);
scale_stat(&stats->cache_bypass_hits);
scale_stat(&stats->cache_bypass_misses);
- scale_stat(&stats->cache_readaheads);
scale_stat(&stats->cache_miss_collisions);
scale_stat(&stats->sectors_bypassed);
}
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
move_stat(cache_misses);
move_stat(cache_bypass_hits);
move_stat(cache_bypass_misses);
- move_stat(cache_readaheads);
move_stat(cache_miss_collisions);
move_stat(sectors_bypassed);
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
mark_cache_stats(&c->accounting.collector, hit, bypass);
}
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
- atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index abfaabf7e7fc..ca4f435f7216 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -7,7 +7,6 @@ struct cache_stat_collector {
atomic_t cache_misses;
atomic_t cache_bypass_hits;
atomic_t cache_bypass_misses;
- atomic_t cache_readaheads;
atomic_t cache_miss_collisions;
atomic_t sectors_bypassed;
};
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index cc89f3156d1a..05ac1d6fbbf3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
-rw_attribute(readahead);
rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff);
- var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running));
sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff,
0, UINT_MAX);
- d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats)
bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_running,
&sysfs_state,
&sysfs_label,
- &sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_bypass_torture_test,
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index a07674ed0596..4c5621b17a6f 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
pcr->ic_version = rtl8411_get_ic_version(pcr);
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 39a6a7ecc32e..29f5414072bf 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 8200af22b529..4bcfbc9afbac 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 781a86def59a..ffc128278613 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 89e6f124ca5c..c748eaf1ec1f 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index b2676e7f5027..53f3a1f45c4a 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 080a7d67a8e1..9b42b20a3e5a 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 6c64dade8e1a..1fd4e0e50730 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = 0x00;
pcr->sd30_drive_sel_3v3 = 0x00;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 273311184669..baf83594a01d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
if (pcr->aspm_enabled == enable)
return;
- if (pcr->aspm_en & 0x02)
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
- else
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ enable ? pcr->aspm_en : 0);
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_en & 0x02)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ else
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ }
if (!enable && (pcr->aspm_en & 0x02))
mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+ if (pcr->aspm_mode == ASPM_MODE_REG)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
int err;
+ u16 cfg_val;
+ u8 val;
spin_lock_init(&pcr->lock);
mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
if (!pcr->slots)
return -ENOMEM;
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+ if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+ pcr->aspm_enabled = true;
+ else
+ pcr->aspm_enabled = false;
+
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
- u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
- rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
- if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
- pcr->aspm_enabled = false;
- else
- pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 635bf31a6735..baab4c2e1b53 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
+ int cmd_error;
+
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
- if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+ if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
set_bit(i, priv->taps);
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
set_bit(i, priv->smpcmp);
+
+ if (cmd_error)
+ mmc_abort_tuning(mmc, opcode);
}
ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+ { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
{ .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index d17482395a4d..4ffbfd534f18 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
rtnl_lock();
result = register_netdevice(dev);
if (result) {
+ tty_kref_put(tty);
rtnl_unlock();
free_netdev(dev);
return -ENODEV;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 029e77dfa773..a45865bd7254 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -82,6 +82,8 @@ struct mcba_priv {
bool can_ka_first_pass;
bool can_speed_check;
atomic_t free_ctx_cnt;
+ void *rxbuf[MCBA_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
};
/* CAN frame */
@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- GFP_KERNEL, &urb->transfer_dma);
+ GFP_KERNEL, &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
if (err) {
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- buf, urb->transfer_dma);
+ buf, buf_dma);
usb_free_urb(urb);
break;
}
+ priv->rxbuf[i] = buf;
+ priv->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
static void mcba_urb_unlink(struct mcba_priv *priv)
{
+ int i;
+
usb_kill_anchored_urbs(&priv->rx_submitted);
+
+ for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
+ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+ priv->rxbuf[i], priv->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&priv->tx_submitted);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 881f88754bf6..52571486705e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
struct ena_tx_buffer *tx_info,
struct xdp_frame *xdpf,
- void **push_hdr,
- u32 *push_len)
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
- dma_addr_t dma = 0;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
u32 size;
tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
size = tx_info->xdpf->len;
- ena_buf = tx_info->bufs;
- /* llq push buffer */
- *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
- *push_hdr = tx_info->xdpf->data;
+ if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
- if (size - *push_len > 0) {
+ if (size > 0) {
dma = dma_map_single(xdp_ring->dev,
- *push_hdr + *push_len,
- size - *push_len,
+ data,
+ size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
goto error_report_dma_error;
- tx_info->map_linear_data = 1;
- tx_info->num_of_bufs = 1;
- }
+ tx_info->map_linear_data = 0;
- ena_buf->paddr = dma;
- ena_buf->len = size;
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
return 0;
@@ -274,10 +286,6 @@ error_report_dma_error:
&xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
- xdp_return_frame_rx_napi(tx_info->xdpf);
- tx_info->xdpf = NULL;
- tx_info->num_of_bufs = 0;
-
return -EINVAL;
}
@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
u16 next_to_use, req_id;
- void *push_hdr;
- u32 push_len;
int rc;
next_to_use = xdp_ring->next_to_use;
@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
return rc;
- ena_tx_ctx.ena_bufs = tx_info->bufs;
- ena_tx_ctx.push_header = push_hdr;
- ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
ena_tx_ctx.req_id = req_id;
- ena_tx_ctx.header_len = push_len;
rc = ena_xmit_common(dev,
xdp_ring,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b3d74332ed33..7748b276e5fd 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1849,6 +1849,7 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
+ pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fcc729d52b17..aef3fccc27a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7308,7 +7308,7 @@ skip_rdma:
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -11750,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
bnxt_hwrm_coal_params_qcaps(bp);
}
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
static int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -11764,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
return rc;
}
+ rc = bnxt_probe_phy(bp, false);
+ if (rc)
+ return rc;
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
@@ -13155,6 +13160,7 @@ init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 61ea3ec5c3fc..83ed10ac8660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
return ret;
}
- spin_lock_bh(&adap->win0_lock);
+ /* We have to RESET the chip/firmware because we need the
+ * chip in uninitialized state for loading new PHY image.
+ * Otherwise, the running firmware will only store the PHY
+ * image in local RAM which will be lost after next reset.
+ */
+ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+ ret);
+ return ret;
+ }
+
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
- spin_unlock_bh(&adap->win0_lock);
- if (ret)
- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
u32 ftid)
{
struct tid_info *t = &adap->tids;
- struct filter_entry *f;
- if (ftid < t->nhpftids)
- f = &adap->tids.hpftid_tab[ftid];
- else if (ftid < t->nftids)
- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
- else
- f = lookup_tid(&adap->tids, ftid);
+ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+ return &t->hpftid_tab[ftid - t->hpftid_base];
- return f;
+ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+ return &t->ftid_tab[ftid - t->ftid_base];
+
+ return lookup_tid(t, ftid);
}
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
filter_id = filter_info->loc_array[cmd->fs.location];
f = cxgb4_get_filter_entry(adapter, filter_id);
+ if (f->fs.prio)
+ filter_id -= adapter->tids.hpftid_base;
+ else if (!f->fs.hash)
+ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
if (ret)
goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ if (fs.prio)
+ tid += adapter->tids.hpftid_base;
+ else if (!fs.hash)
+ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
filter_info->loc_array[cmd->fs.location] = tid;
set_bit(cmd->fs.location, filter_info->bmap);
filter_info->in_use++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 22c9ac922eba..6260b3bebd2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- (u64)f->fs.nat_lip[0] << 25, 1);
+ (u64)f->fs.nat_lip[0] << 24, 1);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1f601de02e70..762113a04dde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
/* Load PHY Firmware onto adapter.
*/
- spin_lock_bh(&adap->win0_lock);
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
(u8 *)phyf->data, phyf->size);
- spin_unlock_bh(&adap->win0_lock);
if (ret < 0)
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
-ret);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9428ef1f04a8..a0555f4d76fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
+ * @byte_oriented: whether to store data as bytes or as words
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
+ * If @byte_oriented is set the write data is stored as byte stream
+ * (i.e. matches what on disk), otherwise in big-endian.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
- unsigned int n, const u8 *data)
+ unsigned int n, const u8 *data, bool byte_oriented)
{
- int ret;
- u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
+ u32 buf[64];
+ int ret;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
- for (left = n; left; left -= c) {
+ for (left = n; left; left -= c, data += c) {
c = min(left, 4U);
- for (val = 0, i = 0; i < c; ++i)
- val = (val << 8) + *data++;
+ for (val = 0, i = 0; i < c; ++i) {
+ if (byte_oriented)
+ val = (val << 8) + data[i];
+ else
+ val = (val << 8) + data[c - i - 1];
+ }
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+ byte_oriented);
if (ret)
return ret;
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
if (ret)
goto out;
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
if (ret)
goto out;
}
- ret = t4_write_flash(adap,
- fw_start + offsetof(struct fw_hdr, fw_ver),
- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+ true);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
/* Copy the supplied PHY Firmware image to the adapter memory location
* allocated by the adapter firmware.
*/
+ spin_lock_bh(&adap->win0_lock);
ret = t4_memory_rw(adap, win, mtype, maddr,
phy_fw_size, (__be32 *)phy_fw_data,
T4_MEMORY_WRITE);
+ spin_unlock_bh(&adap->win0_lock);
if (ret)
return ret;
@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
n = size - i;
else
n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, true);
if (ret)
goto out;
@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
boot_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+ false);
if (ret)
goto out;
}
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
- (const u8 *)header);
+ (const u8 *)header, false);
out:
if (ret)
@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < size; i += SF_PAGE_SIZE) {
n = min_t(u32, size - i, SF_PAGE_SIZE);
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, false);
if (ret)
goto out;
@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < npad; i++) {
u8 data = 0;
- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+ false);
if (ret)
goto out;
}
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46b0dbab8aad..7c992172933b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
struct ec_bhf_priv *priv = netdev_priv(net_dev);
unregister_netdev(net_dev);
- free_netdev(net_dev);
pci_iounmap(dev, priv->dma_io);
pci_iounmap(dev, priv->io);
+
+ free_netdev(net_dev);
+
pci_release_regions(dev);
pci_clear_master(dev);
pci_disable_device(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6eba29d8e99..7968568bbe21 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5897,6 +5897,7 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
+ pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1753807cbf97..d71eac7e1924 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ if (!fep->cycle_speed) {
+ fep->cycle_speed = NSEC_PER_SEC;
+ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+ }
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d70ee573fde5..27f9dac8719c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1717,12 +1717,13 @@ setup_rings:
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1734,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
qg_buf->num_txqs = 1;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
@@ -1754,7 +1755,7 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -1769,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
int ret;
int i;
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
@@ -2009,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings)
+ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
{
u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
struct ice_txq_meta txq_meta = { };
int status;
@@ -2047,7 +2049,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -2056,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
*/
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..0eb2307325d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2556,6 +2556,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+ struct netdev_bpf *xdp)
+{
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Please provide working DDP firmware package in order to use XDP\n"
+ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_xdp - implements XDP handler
* @dev: netdevice
* @xdp: XDP command
@@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp_safe_mode,
};
static const struct net_device_ops ice_netdev_ops = {
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 36dc3e5f6218..21ef2f128070 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
dma_addr_t mapping;
int ret = 0;
@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
goto skip;
}
@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
- ch->skb[ch->dma.desc] = skb;
net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
struct xrx200_chan *ch = ptr;
if (napi_schedule_prep(&ch->napi)) {
- __napi_schedule(&ch->napi);
ltq_dma_disable_irq(&ch->dma);
+ __napi_schedule(&ch->napi);
}
ltq_dma_ack_irq(&ch->dma);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6cfec81ccc3..dc4ac1a2b6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+ dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 8f020f26ebf5..cf64e54eecb0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
+ bool map_clock_to_user;
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c326b434734e..00c84656b2e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
+ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
+ if (!dev->caps.map_clock_to_user) {
+ mlx4_dbg(dev, "Map clock to user is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
if (!params)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9166cd85013..ceebfc20f65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
mutex_lock(&mlx5_intf_mutex);
+ priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
}
} else {
adev = &priv->adev[i]->adev;
+
+ /* Pay attention that this is not PCI driver that
+ * mlx5_core_dev is connected, but auxiliary driver.
+ *
+ * Here we can race of module unload with devlink
+ * reload, but we don't need to take extra lock because
+ * we are holding global mlx5_intf_mutex.
+ */
+ if (!adev->dev.driver)
+ continue;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
adev = &priv->adev[i]->adev;
+ /* Auxiliary driver was unbind manually through sysfs */
+ if (!adev->dev.driver)
+ goto skip_suspend;
+
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
}
+skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
struct mlx5_priv *priv = &dev->priv;
lockdep_assert_held(&mlx5_intf_mutex);
+ if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+ return 0;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0dd7615e5931..bc33eaada3b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
+ if (!netif_device_present(dev))
+ return NULL;
port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d907c1acd4d5..778e229310a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
-#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
#include "en/params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index ab935cce952b..c96668bd701c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -6,6 +6,7 @@
#include "en.h"
#include "en_stats.h"
+#include <linux/ptp_classify.h>
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_ptp **cp);
void mlx5e_ptp_close(struct mlx5e_ptp *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index be0ee03de721..2e9bee4e5209 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ struct mlx5e_encap_entry *e = NULL;
bool neigh_connected, same_dev;
- struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
u8 nud_state, dead;
rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
if (!same_dev)
goto out;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
+ /* mlx5e_get_next_init_encap() releases previous encap before returning
+ * the next one.
+ */
+ while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+ mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 311382261840..85eaadc989df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index f1fb11680d20..490131e06efb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
mlx5e_take_tmp_flow(flow, flow_list, 0);
}
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e,
+ match_cb match)
{
struct mlx5e_encap_entry *next = NULL;
@@ -288,7 +291,7 @@ retry:
/* wait for encap to be fully initialized */
wait_for_completion(&next->res_ready);
/* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ if (!match(next)) {
e = next;
goto retry;
}
@@ -296,6 +299,30 @@ retry:
return next;
}
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+ return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+ return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3d45341e2216..26f7fab109d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!priv->ipsec)
- return;
-
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 5cd466ec6492..25403af32859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -356,7 +356,7 @@ err:
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err = -ENOMEM;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ec6bafe7a2e5..d26b8ed51195 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2705,8 +2705,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
- if (priv->channels.params.ptp_rx)
- num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -4824,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dd64878e5b38..d4b0f270b6bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4765,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
- hpe->hp->pair->peer_gone = true;
+ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 25c091795bcd..17027536efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e);
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8ba62671f5f1..320fe0cda917 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,7 +32,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
- struct flow_keys fk;
-
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return false;
-
- if (fk.basic.n_proto == htons(ETH_P_1588))
- return true;
-
- if (fk.basic.n_proto != htons(ETH_P_IP) &&
- fk.basic.n_proto != htons(ETH_P_IPV6))
- return false;
-
- return (fk.basic.ip_proto == IPPROTO_UDP &&
- fk.ports.dst == htons(PTP_EV_PORT));
-}
-
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel) &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb))
+ if (unlikely(ptp_channel &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb)))
return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 77c0ca655975..940333410267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq);
if (!eqe)
- return 0;
+ goto out;
do {
struct mlx5_core_cq *cq;
@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
eq_update_ci(eq, 1);
if (cqn != -1)
@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
- eq_update_ci(eq, 1);
out:
+ eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b88705a3a1a8..97e6cb6f13c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1054,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
goto err_vhca_mapping;
}
+ /* External controller host PF has factory programmed MAC.
+ * Read it from the device.
+ */
+ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a1d67bd7fb43..0d0f63a27aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_core_set_hca_defaults(dev);
if (err) {
mlx5_core_err(dev, "Failed to set hca defaults\n");
- goto err_sriov;
+ goto err_set_hca;
}
mlx5_vhca_event_start(dev);
@@ -1194,6 +1194,7 @@ err_ec:
mlx5_sf_hw_table_destroy(dev);
err_vhca:
mlx5_vhca_event_stop(dev);
+err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 50af84e76fb6..174f71ed5280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
init_waitqueue_head(&mkey->wait);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 441b5453acae..540cf05f6373 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, roce))
+ return;
+
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 6a0c6f965ad1..fa0288afc0dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 054c2e2b6554..7466f016375c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
return -EINVAL;
- memcpy(padded_data, data, data_sz);
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
/* Remove L2L3 outer headers */
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
hw_action += DR_STE_ACTION_DOUBLE_SZ;
used_actions++; /* Remove and NOP are a single double action */
- inline_data_sz =
- MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
- /* Add the new header inline + 2 extra bytes */
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
void *addr_inline;
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects here offset to words (2 bytes) */
- MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
- i * 2);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
/* Copy bytes one by one to avoid endianness problem */
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
hw_action, inline_data);
- memcpy(addr_inline, data_ptr, inline_data_sz);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
- data_ptr += inline_data_sz;
used_actions++;
}
- /* Remove 2 extra bytes */
+ /* Remove first 2 extra bytes */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
- MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
/* The hardware expects here size in words (2 bytes) */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
used_actions++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 612b0ac31db2..9737565cd8d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
- (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
- (MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX));
+ return MLX5_CAP_GEN(dev, roce) &&
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 01cc00ad8acf..b6931bbe52d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -424,6 +424,15 @@ err_modify_sq:
return err;
}
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
-
/* unset peer SQs */
- if (hp->peer_gone)
- return;
- for (i = 0; i < hp->num_channels; i++)
- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RST, 0, 0);
+ if (!hp->peer_gone)
+ mlx5_hairpin_unpair_peer_sq(hp);
}
struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ mlx5_hairpin_unpair_peer_sq(hp);
+
+ /* destroy peer SQ */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+ hp->peer_gone = true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 457ad42eaa2a..4c1440a95ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
void *in;
int err;
- if (!vport)
- return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index dfea14399607..85f0ce285146 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0, 0);
+ NULL, 0,
+ module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
return err;
@@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0, 0);
+ NULL, 0,
+ gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 900b4bf5bb5b..2bc5a9003c6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 04672eb5c7f3..9958d503bf0e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
+ u32 parent;
if (band < mlxsw_sp_qdisc->num_classes &&
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+ parent);
if (!WARN_ON(!mlxsw_sp_qdisc))
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0c4283319d7f..adfb9781799e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
int ocelot_port_flush(struct ocelot *ocelot, int port)
{
+ unsigned int pause_ena;
int err, val;
/* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
QSYS_PORT_MODE, port);
/* Disable flow control */
+ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ /* Re-enable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
return err;
}
EXPORT_SYMBOL(ocelot_port_flush);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7e6bac85495d..344ea1143454 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 17d5b649eb36..e81dd34a3cac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct qed_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 96b947fde646..3beafc60747e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
kfree(ahw);
err_out_free_res:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 41fbd2ceeede..ab1e0fcccabb 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct rmnet_vnd_stats total_stats;
+ struct rmnet_vnd_stats total_stats = { };
struct rmnet_pcpu_stats *pcpu_ptr;
+ struct rmnet_vnd_stats snapshot;
unsigned int cpu, start;
- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+ snapshot = pcpu_ptr->stats; /* struct assignment */
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+ total_stats.rx_pkts += snapshot.rx_pkts;
+ total_stats.rx_bytes += snapshot.rx_bytes;
+ total_stats.tx_pkts += snapshot.tx_pkts;
+ total_stats.tx_bytes += snapshot.tx_bytes;
+ total_stats.tx_drops += snapshot.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
}
return 0;
-} \ No newline at end of file
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 2c89cde7da1e..2ee72dc431cd 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
break;
}
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c5b154868c1f..713d3629b4c1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *sh_eth_gstrings_stats,
+ memcpy(data, sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b70d44ac0990..3c73453725f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -76,10 +76,10 @@ enum power_event {
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
/* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
- (reg * 8))
-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
- (reg * 8))
+#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+ 0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+ 0x00000044 + (reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1e17a23d9118..a696ada013eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -622,6 +622,8 @@ error_pclk_get:
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a1f5f07f4ca9..9a13953ea70f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
+ /* Make sure that the other fields are read after bd is
+ * released by dma
+ */
+ rmb();
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
if (skb)
dev_consume_skb_irq(skb);
- cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
+ /* app0 must be visible last, as it is used to flag
+ * availability of the bd
+ */
+ smp_mb();
+ cur_p->app0 = 0;
+
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
if (cur_p->app0)
return NETDEV_TX_BUSY;
+ /* Make sure to read next bd app0 after this one */
+ rmb();
+
tail++;
if (tail >= lp->tx_bd_num)
tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
smp_mb();
/* Space might have just been freed - check again */
- if (temac_check_tx_bd_space(lp, num_frag))
+ if (temac_check_tx_bd_space(lp, num_frag + 1))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
- ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ /* Mark last fragment with skb address, so it can be consumed
+ * in temac_start_xmit_done()
+ */
+ ptr_to_txbd((void *)skb, cur_p);
+
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
+ netif_stop_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 65154224d5b8..7685a1721597 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
ax->tty = NULL;
unregister_netdev(ax->dev);
+ free_netdev(ax->dev);
}
/* Perform I/O control on an active ax25 channel. */
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index 0d8293a47a56..b806f2f8f859 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
return 0;
}
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
const struct mhi_net_proto *proto = mhi_netdev->proto;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9bd9a5c0b1db..6bbc81ad295f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
if (err < 0)
return err;
usleep_range(10, 20);
- /* After reset FORCE_LINK_GOOD bit is set. Although the
- * default value should be unset. Disable FORCE_LINK_GOOD
- * for the phy to work properly.
- */
return phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 2e60bc1b9a6b..359ea0d10e59 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+ dev_kfree_skb_any(skb);
if (!skb2)
return NULL;
- dev_kfree_skb_any(skb);
skb = skb2;
done:
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b04055fd1b79..df0d1837e4ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1880,7 +1880,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_LINK_INTR,
+ | FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6700f1970b24..bc55ec739af9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
skb->protocol = htons(ETH_P_MAP);
- return (netif_rx(skb) == NET_RX_SUCCESS);
+ return 1;
}
switch (skb->data[0] & 0xf0) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f6abb2fbf972..e25bfb7021ed 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8678,7 +8678,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
break;
}
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index b286993da67c..13141dbfa3a8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- goto err;
+ goto free_pdata;
}
smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- goto err;
+ goto cancel_work;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
-err:
+cancel_work:
+ cancel_work_sync(&pdata->set_multicast);
+free_pdata:
kfree(pdata);
+ dev->data[0] = 0;
return ret;
}
@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
- pdata = NULL;
dev->data[0] = 0;
}
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 503e2fd7ce51..28a6c4cfe9b8 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
dev->flags = IFF_MASTER | IFF_NOARP;
- /* MTU is irrelevant for VRF device; set to 64k similar to lo */
- dev->mtu = 64 * 1024;
-
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
netdev_lockdep_set_classes(dev);
@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
* which breaks networking.
*/
dev->min_mtu = IPV6_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->max_mtu = IP6_MAX_MTU;
+ dev->mtu = dev->max_mtu;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 51ce767eaf88..7a6fd46d0c6e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1693,8 +1693,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
+
data->started = false;
hrtimer_cancel(&data->beacon_timer);
+
+ while (!skb_queue_empty(&data->pending))
+ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+
wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 193b723fe3bd..c58996c1e230 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{
if (queue->task) {
kthread_stop(queue->task);
+ put_task_struct(queue->task);
queue->task = NULL;
}
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (IS_ERR(task))
goto kthread_err;
queue->task = task;
+ /*
+ * Take a reference to the task in order to prevent it from being freed
+ * if the thread function returns before kthread_stop is called.
+ */
+ get_task_struct(task);
task = kthread_run(xenvif_dealloc_kthread, queue,
"%s-dealloc", queue->name);
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index eca805c1a023..9e6ce0dc2f53 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
@@ -38,6 +39,6 @@ ifdef CONFIG_ACPI
ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
-obj-$(CONFIG_ARM64) += pcie-tegra194.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
endif
endif
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000000..c2de6ed4d86f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index bafd2c6ab3c2..504669e3afe0 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -22,8 +22,6 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -247,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
@@ -313,104 +293,6 @@ struct tegra_pcie_dw_of_data {
enum dw_pcie_device_mode mode;
};
-#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-struct tegra194_pcie_ecam {
- void __iomem *config_base;
- void __iomem *iatu_base;
- void __iomem *dbi_base;
-};
-
-static int tegra194_acpi_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct tegra194_pcie_ecam *pcie_ecam;
-
- pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
- if (!pcie_ecam)
- return -ENOMEM;
-
- pcie_ecam->config_base = cfg->win;
- pcie_ecam->iatu_base = cfg->win + SZ_256K;
- pcie_ecam->dbi_base = cfg->win + SZ_512K;
- cfg->priv = pcie_ecam;
-
- return 0;
-}
-
-static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
- u32 val, u32 reg)
-{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- writel(val, pcie_ecam->iatu_base + offset + reg);
-}
-
-static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
- PCIE_ATU_LOWER_BASE);
- atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
- PCIE_ATU_UPPER_BASE);
- atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
- PCIE_ATU_LOWER_TARGET);
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
- PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
-
-static void __iomem *tegra194_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
- u32 busdev;
- int type;
-
- if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
- return NULL;
-
- if (bus->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- return pcie_ecam->dbi_base + where;
- else
- return NULL;
- }
-
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
-
- if (bus->parent->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- type = PCIE_ATU_TYPE_CFG0;
- else
- return NULL;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- }
-
- program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
- SZ_256K);
-
- return pcie_ecam->config_base + where;
-}
-
-const struct pci_ecam_ops tegra194_pcie_ops = {
- .init = tegra194_acpi_init,
- .pci_ops = {
- .map_bus = tegra194_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
-
-#ifdef CONFIG_PCIE_TEGRA194
-
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
return container_of(pci, struct tegra_pcie_dw, pci);
@@ -694,6 +576,24 @@ static struct pci_ops tegra_pci_ops = {
};
#if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+ 0x1d8,
+ 0x1a8,
+ 0x1a8,
+ 0x1a8,
+ 0x1c4,
+ 0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+ 0x1dc,
+ 0x1ac,
+ 0x1ac,
+ 0x1ac,
+ 0x1c8,
+ 0x1dc
+};
+
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -2411,5 +2311,3 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
MODULE_LICENSE("GPL v2");
-
-#endif /* CONFIG_PCIE_TEGRA194 */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 051b48bd7985..e3f5e7ab7606 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
udelay(PIO_RETRY_DELAY);
}
- dev_err(dev, "config read/write timed out\n");
+ dev_err(dev, "PIO read/write transfer time out\n");
return -ETIMEDOUT;
}
@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
return true;
}
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie)) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (where % size)
return PCIBIOS_SET_FAILED;
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 85dcb7097da4..a143b02b2dcd 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -353,6 +353,8 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
dev_node);
*io_base = range.cpu_addr;
+ } else if (resource_type(res) == IORESOURCE_MEM) {
+ res->flags &= ~IORESOURCE_MEM_64;
}
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dcb229de1acb..22b2bb1109c9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3547,6 +3547,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+ if ((dev->device & 0xffc0) == 0x2340)
+ quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ quirk_nvidia_no_bus_reset);
+
+/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
@@ -3566,6 +3578,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working. Prevent bus reset for these devices. With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs. Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -3901,6 +3923,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
return 0;
}
+#define PCI_DEVICE_ID_HINIC_VF 0x375E
+#define HINIC_VF_FLR_TYPE 0x1000
+#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
+#define HINIC_VF_OP 0xE80
+#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+ unsigned long timeout;
+ void __iomem *bar;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(pdev, 0, 0);
+ if (!bar)
+ return -ENOTTY;
+
+ /* Get and check firmware capabilities */
+ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+ pci_iounmap(pdev, bar);
+ return -ENOTTY;
+ }
+
+ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+ val = ioread32be(bar + HINIC_VF_OP);
+ val = val | HINIC_VF_FLR_PROC_BIT;
+ iowrite32be(val, bar + HINIC_VF_OP);
+
+ pcie_flr(pdev);
+
+ /*
+ * The device must recapture its Bus and Device Numbers after FLR
+ * in order generate Completions. Issue a config write to let the
+ * device capture this information.
+ */
+ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+ do {
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+ msleep(20);
+ } while (time_before(jiffies, timeout));
+
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+
+ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+ pci_iounmap(pdev, bar);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3913,6 +3998,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
+ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+ reset_hinic_vf_dev },
{ 0 }
};
@@ -4753,6 +4840,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ /* Broadcom multi-function device */
+ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5154,7 +5243,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5))
+ (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+ (pdev->device == 0x7341 && pdev->revision != 0x00))
return;
if (pdev->device == 0x15d8) {
@@ -5181,6 +5271,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index 899b9eb43fad..a39f30fa2e99 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
static inline void brcm_usb_writel(u32 val, void __iomem *addr)
{
/* See brcmnand_readl() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 5c68e31c5939..e93818e3991f 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->nsubnodes = node;
if (sp->num_lanes > SIERRA_MAX_LANES) {
+ ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
goto put_child2;
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cdbcc49f7115..731c483a04de 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
+ clk_disable_unprepare(instance->ref_clk);
+ clk_disable_unprepare(instance->da_ref_clk);
return -EINVAL;
}
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index c8a7d0927ced..4076580fc2cd 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
priv->coreclock = clock;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ dev_err(priv->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
if (IS_ERR(iomem)) {
dev_err(priv->dev, "Unable to get serdes registers: %s\n",
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 753cb5bab930..2a9465f4bb3a 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
.probe = mt7621_pci_phy_probe,
.driver = {
.name = "mt7621-pci-phy",
- .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+ .of_match_table = mt7621_pci_phy_ids,
},
};
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 9eb6d37c907e..126f5b8735cc 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ ret = -EINVAL;
dev_err(dev, "Invalid typec-dir-debounce property\n");
goto err_addr_to_resource;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 996ebcba4d38..4c0d26606b6c 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
}
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 5c1a109842a7..eeab093a7815 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
};
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9c65d560d48f..9bbfe5c14b36 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
}
/**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
*
* @ctx: The pinmux context
* @exprs: The list of signal expressions (from a priority level on a pin)
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 57305ca838a7..894e2efd3be7 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
}
/**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 25d2f7f7f3b6..11e967dbb44b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 5aaf57b40407..0bb4931cec59 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
- "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
- "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
- "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
- "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
- "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
- "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
- "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
- "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
- "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107",
};
static const char * const qdss_stm_groups[] = {
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 1f4bca854add..a9b511c7e850 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
if (p->groups[group].enabled) {
dev_err(p->dev, "%s is already enabled\n",
p->groups[group].name);
- return -EBUSY;
+ return 0;
}
p->groups[group].enabled = 1;
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index a9db2f32658f..b013445147dd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
- | IRQF_SHARED | IRQF_NO_AUTOEN,
- "mlxreg-hotplug", priv);
+ | IRQF_SHARED, "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
+ disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 8a70df60142c..a06964aa96e7 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
{
int status;
- status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
if (status < 0 && status != -EINVAL) {
ssam_err(ctrl,
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 685d37a7add1..ef83461fa536 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -156,7 +156,7 @@ static const struct software_node *ssam_node_group_sl2[] = {
NULL,
};
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
static const struct software_node *ssam_node_group_sl3[] = {
&ssam_node_root,
&ssam_node_bat_ac,
@@ -521,9 +521,12 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop 3 (13", Intel) */
{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
- /* Surface Laptop 3 (15", AMD) */
+ /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+ /* Surface Laptop 4 (13", Intel) */
+ { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 5d9b758a99bb..1203b9a82993 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
*/
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_write(&ddev->client_lock);
+ mutex_destroy(&client->read_lock);
sdtx_device_put(client->ddev);
kfree(client);
return -ENODEV;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd60c9397d35..edd71e744d27 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
+ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 03a246e60fd9..21c4c34c52d8 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
spin_unlock_irqrestore(&queue->lock, flags);
}
-s32 scaled_ppm_to_ppb(long ppm)
+long scaled_ppm_to_ppb(long ppm)
{
/*
* The 'freq' field in the 'struct timex' is in parts per
@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
s64 ppb = 1 + ppm;
ppb *= 125;
ppb >>= 13;
- return (s32) ppb;
+ return (long) ppb;
}
EXPORT_SYMBOL(scaled_ppm_to_ppb);
@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
- s32 ppb = scaled_ppm_to_ppb(tx->freq);
+ long ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
if (ops->adjfine)
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9d84d9245490..3e7a38525cb3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1031,7 +1031,7 @@ config REGULATOR_RT5033
current source, LDO and Buck.
config REGULATOR_RTMV20
- tristate "RTMV20 Laser Diode Regulator"
+ tristate "Richtek RTMV20 Laser Diode Regulator"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index d8b429955d33..05147d2c3842 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
};
static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
};
static const unsigned int atc260x_ldo_voltage_range_sel[] = {
- 0x0, 0x1,
+ 0x0, 0x20,
};
static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
.owner = THIS_MODULE, \
}
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
.name = "LDO"#num, \
.supply_name = "ldo"#num, \
.of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
.type = REGULATOR_VOLTAGE, \
.linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
.n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+ .n_voltages = n_volt, \
.vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
.vsel_mask = GENMASK(4, 1), \
.vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
atc2609a_reg_desc_ldo_bypass(0),
atc2609a_reg_desc_ldo_bypass(1),
atc2609a_reg_desc_ldo_bypass(2),
- atc2609a_reg_desc_ldo_range_pick(3, 0),
- atc2609a_reg_desc_ldo_range_pick(4, 0),
+ atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
atc2609a_reg_desc_ldo(5),
- atc2609a_reg_desc_ldo_range_pick(6, 1),
- atc2609a_reg_desc_ldo_range_pick(7, 0),
- atc2609a_reg_desc_ldo_range_pick(8, 0),
+ atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+ atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
atc2609a_reg_desc_ldo_fixed(9),
};
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e61295b30503..b1eb46961993 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
NULL);
BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
- regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+ regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
NULL);
/*
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f192bf19492e..e20e77e4c159 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1425,6 +1425,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
* and we have control then make sure it is enabled.
*/
if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ /* If we want to enable this regulator, make sure that we know
+ * the supplying regulator.
+ */
+ if (rdev->supply_name && !rdev->supply)
+ return -EPROBE_DEFER;
+
if (rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index eb3fc1db4edc..c4754f3cf233 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
+ ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- return PTR_ERR(drvdata->dev);
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 08cbf688e14d..e66925090258 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
case DA9121_BUCK_MODE_FORCE_PFM:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- unsigned int val;
+ unsigned int val, mode;
int ret = 0;
ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
return -EINVAL;
}
- return da9121_map_mode(val & da9121_mode_field[id].msk);
+ mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+ if (mode == REGULATOR_MODE_INVALID)
+ return -EINVAL;
+
+ return mode;
}
static const struct regulator_ops da9121_buck_ops = {
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index f3918f03aaf3..26f06f685b1b 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -55,7 +55,6 @@
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
#define FAN53526_NVOLTAGES 128
-#define TCS4525_NVOLTAGES 127 /* Numbers of voltages */
#define TCS_VSEL_NSEL_MASK 0x7f
#define TCS_VSEL0_MODE (1 << 7)
@@ -376,7 +375,7 @@ static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
/* Init voltage range and step */
di->vsel_min = 600000;
di->vsel_step = 6250;
- di->vsel_count = TCS4525_NVOLTAGES;
+ di->vsel_count = FAN53526_NVOLTAGES;
return 0;
}
diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
index e83eb4fb1876..1684faf82ed2 100644
--- a/drivers/regulator/fan53880.c
+++ b/drivers/regulator/fan53880.c
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
}, \
.n_linear_ranges = 2, \
+ .n_voltages = 0x74, \
.vsel_reg = FAN53880_LDO ## _num ## VOUT, \
.vsel_mask = 0x7f, \
.enable_reg = FAN53880_ENABLE, \
@@ -76,6 +77,7 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
},
.n_linear_ranges = 2,
+ .n_voltages = 0xf8,
.vsel_reg = FAN53880_BUCKVOUT,
.vsel_mask = 0x7f,
.enable_reg = FAN53880_ENABLE,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
},
.n_linear_ranges = 2,
+ .n_voltages = 0x71,
.vsel_reg = FAN53880_BOOSTVOUT,
.vsel_mask = 0x7f,
.enable_reg = FAN53880_ENABLE_BOOST,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 02ad83153e19..34e255c235d4 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
{
struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
struct device *dev = rdev->dev.parent;
+ int ret;
+
+ ret = dev_pm_genpd_set_performance_state(dev, 0);
+ if (ret)
+ return ret;
priv->enable_counter--;
- return dev_pm_genpd_set_performance_state(dev, 0);
+ return 0;
}
static int reg_is_enabled(struct regulator_dev *rdev)
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 0e16e31c968f..ad2237a95572 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
int ret;
unsigned int sel;
- if (!rdev->desc->n_ramp_values)
+ if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
return -EINVAL;
ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
index f6a14e9c3cbf..d6340bb49296 100644
--- a/drivers/regulator/hi6421v600-regulator.c
+++ b/drivers/regulator/hi6421v600-regulator.c
@@ -3,7 +3,7 @@
// Device driver for regulators in Hisi IC
//
// Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
//
// Guodong Xu <guodong.xu@linaro.org>
@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
.owner = THIS_MODULE, \
.volt_table = vtable, \
.n_voltages = ARRAY_SIZE(vtable), \
- .vsel_mask = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+ .vsel_mask = ARRAY_SIZE(vtable) - 1, \
.vsel_reg = vreg, \
.enable_reg = ereg, \
.enable_mask = emask, \
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index ac2ee2030211..68cdb173196d 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -2,7 +2,7 @@
//
// Device driver for regulators in Hi655x IC
//
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
//
// Authors:
// Chen Feng <puck.chen@hisilicon.com>
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 8d9731e4052b..3cf8f085170a 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = pmic;
+ /*
+ * Set of_node_reuse flag to prevent driver core from attempting to
+ * claim any pinmux resources already claimed by the parent device.
+ * Otherwise PMIC driver will fail to re-probe.
+ */
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
for (id = 0; id < MAX77620_NUM_REGS; id++) {
struct regulator_dev *rdev;
struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
return ret;
rdev = devm_regulator_register(dev, rdesc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(dev, "Regulator registration %s failed: %d\n",
- rdesc->name, ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Regulator registration %s failed\n",
+ rdesc->name);
}
return 0;
diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
index 9edc34981ee0..6b8be52c3772 100644
--- a/drivers/regulator/mt6315-regulator.c
+++ b/drivers/regulator/mt6315-regulator.c
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
};
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
{
switch (mode) {
case MT6315_BUCK_MODE_AUTO:
diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
index 2055a9cb13ba..7a87788d3f09 100644
--- a/drivers/regulator/rt4801-regulator.c
+++ b/drivers/regulator/rt4801-regulator.c
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev), ret;
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev);
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
index 852fb2596ffd..4bca64de0f67 100644
--- a/drivers/regulator/rtmv20-regulator.c
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -27,6 +27,7 @@
#define RTMV20_REG_LDIRQ 0x30
#define RTMV20_REG_LDSTAT 0x40
#define RTMV20_REG_LDMASK 0x50
+#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
#define RTMV20_VID_MASK GENMASK(7, 4)
#define RICHTEK_VID 0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
return 0;
}
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ int sel;
+
+ if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+ return -EINVAL;
+
+ if (max_uA > RTMV20_LSW_MAXUA)
+ max_uA = RTMV20_LSW_MAXUA;
+
+ sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+ /* Ensure the selected setting is still in range */
+ if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+ rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+ if (ret)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
static const struct regulator_ops rtmv20_regulator_ops = {
- .set_current_limit = regulator_set_current_limit_regmap,
- .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = rtmv20_lsw_set_current_limit,
+ .get_current_limit = rtmv20_lsw_get_current_limit,
.enable = rtmv20_lsw_enable,
.disable = rtmv20_lsw_disable,
.is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = RTMV20_REG_LDMASK,
+ .num_reg_defaults_raw = RTMV20_MAX_REGS,
.writeable_reg = rtmv20_is_accessible_reg,
.readable_reg = rtmv20_is_accessible_reg,
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index bbadf72b94e8..1f02f60ad136 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
sreg->desc.uV_step =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
sreg->desc.linear_min_sel = 0;
- sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+ sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
sreg->desc.ops = &scmi_reg_linear_ops;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ecefc25eff0c..337353c9655e 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ bool found = false;
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->msg, aq->reply->len);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count--;
+ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->receive(aq, ap_msg, aq->reply);
+ found = true;
break;
}
+ if (!found) {
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+ __func__, aq->reply->psmid,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ }
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count++;
+ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 697c09ef259b..cd52664920e1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_enable_async_suspend(&shost->shost_dev);
+ get_device(&shost->shost_gendev);
error = device_add(&shost->shost_dev);
if (error)
goto out_del_gendev;
- get_device(&shost->shost_gendev);
-
if (shost->transportt->host_size) {
shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (!shost->work_q) {
error = -EINVAL;
- goto out_free_shost_data;
+ goto out_del_dev;
}
}
error = scsi_sysfs_add_host(shost);
if (error)
- goto out_destroy_host;
+ goto out_del_dev;
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
- out_destroy_host:
- if (shost->work_q)
- destroy_workqueue(shost->work_q);
- out_free_shost_data:
- kfree(shost->shost_data);
+ /*
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
+ */
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev:
+ /*
+ * Host state is SHOST_RUNNING so we have to explicitly release
+ * ->shost_dev.
+ */
+ put_device(&shost->shost_dev);
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
ida_simple_remove(&host_index_ida, shost->host_no);
- if (parent)
+ if (shost->shost_state != SHOST_CREATED)
put_device(parent);
kfree(shost);
}
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
mutex_init(&shost->scan_mutex);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto fail_kfree;
+ if (index < 0) {
+ kfree(shost);
+ return NULL;
+ }
shost->host_no = index;
shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_index_remove;
+ goto fail;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
- goto fail_kthread;
+ goto fail;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
+ fail:
+ /*
+ * Host state is still SHOST_CREATED and that is enough to release
+ * ->shost_gendev. scsi_host_dev_release() will free
+ * dev_name(&shost->shost_dev).
+ */
+ put_device(&shost->shost_gendev);
- fail_kthread:
- kthread_stop(shost->ehandler);
- fail_index_remove:
- ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
- kfree(shost);
return NULL;
}
EXPORT_SYMBOL(scsi_host_alloc);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8965fe61c8b4..fe40626e45aa 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,7 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
+#define BCM2835_SPI_NUM_CS 24 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -1195,6 +1195,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
struct gpio_chip *chip;
u32 cs;
+ if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
+ dev_err(&spi->dev, "only %d chip-selects supported\n",
+ BCM2835_SPI_NUM_CS - 1);
+ return -EINVAL;
+ }
+
/*
* Precalculate SPI slave's CS register value for ->prepare_message():
* The driver always uses software-controlled GPIO chip select, hence
@@ -1288,7 +1294,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->use_gpio_descriptors = true;
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+ ctlr->num_chipselect = 3;
ctlr->setup = bcm2835_spi_setup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 6a6af85aebfd..27d0087f8688 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
+ bool initial_setup = false;
+ int retval;
bitbang = spi_master_get_devdata(spi->master);
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
+ initial_setup = true;
}
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
- if (!cs->txrx_word)
- return -EINVAL;
+ if (!cs->txrx_word) {
+ retval = -EINVAL;
+ goto err_free;
+ }
if (bitbang->setup_transfer) {
- int retval = bitbang->setup_transfer(spi, NULL);
+ retval = bitbang->setup_transfer(spi, NULL);
if (retval < 0)
- return retval;
+ goto err_free;
}
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
return 0;
+
+err_free:
+ if (initial_setup)
+ kfree(cs);
+ return retval;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d0e5aa18b7ba..bdf94cc7be1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
+ bool initial_setup = false;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi_set_ctldata(spi, cs);
+ initial_setup = true;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
+ if (initial_setup)
+ kfree(cs);
return retval;
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 71402f71ddd8..df28c6664aba 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -424,15 +424,22 @@ done:
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
+ bool initial_setup = false;
+ int status;
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
return -ENOMEM;
spi->controller_state = ust;
+ initial_setup = true;
}
- return uwire_setup_transfer(spi, NULL);
+ status = uwire_setup_transfer(spi, NULL);
+ if (status && initial_setup)
+ kfree(ust);
+
+ return status;
}
static void uwire_cleanup(struct spi_device *spi)
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 999c22736416..ede7f05e5ced 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1032,8 +1032,22 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
}
}
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs;
+
+ if (spi->controller_state) {
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
+ kfree(cs);
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
+ bool initial_setup = false;
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
+ initial_setup = true;
}
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
+ if (initial_setup)
+ omap2_mcspi_cleanup(spi);
return ret;
}
ret = omap2_mcspi_setup_transfer(spi, NULL);
+ if (ret && initial_setup)
+ omap2_mcspi_cleanup(spi);
+
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return ret;
}
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
- struct omap2_mcspi_cs *cs;
-
- if (spi->controller_state) {
- /* Unlink controller state from context save list */
- cs = spi->controller_state;
- list_del(&cs->node);
-
- kfree(cs);
- }
-}
-
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
{
struct omap2_mcspi *mcspi = data;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5e59ba075bc7..8ee0cc071777 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1254,6 +1254,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+ if (err)
+ gpiod_put(chip->gpiod_cs);
}
return err;
@@ -1267,6 +1269,7 @@ static int setup(struct spi_device *spi)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
+ int err;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1413,7 +1416,11 @@ static int setup(struct spi_device *spi)
if (drv_data->ssp_type == CE4100_SSP)
return 0;
- return setup_cs(spi, chip, chip_info);
+ err = setup_cs(spi, chip, chip_info);
+ if (err)
+ kfree(chip);
+
+ return err;
}
static void cleanup(struct spi_device *spi)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 7e640ccc7e77..594f64136208 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -294,7 +294,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
int err = 0;
if (!op->data.nbytes)
- return stm32_qspi_wait_nobusy(qspi);
+ goto wait_nobusy;
if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
goto out;
@@ -315,6 +315,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+ if (!err)
+ err = stm32_qspi_wait_nobusy(qspi);
return err;
}
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5a3d81c31d04..9262c6418463 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
- goto remove_master;
+ goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
- goto remove_master;
+ goto clk_dis_all;
}
ret = of_property_read_u32(np, "num-cs",
@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
if (ret < 0) {
ctlr->num_chipselect = 1;
} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
dev_err(&pdev->dev, "only 2 chip selects are available\n");
- goto remove_master;
+ goto clk_dis_all;
} else {
ctlr->num_chipselect = num_cs;
}
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 33e28ccf4d85..b5229bc6eae5 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
* GDMA4740 DMAC support
*/
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
};
module_platform_driver(gdma_dma_driver);
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Ralink/MTK DMA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index a6d731e959a2..437859228371 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2091,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
struct net_device *ndev = padapter->pnetdev;
{
- struct station_info sinfo;
+ struct station_info sinfo = {};
u8 ie_offset;
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
@@ -2284,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
mon_ndev->ieee80211_ptr = mon_wdev;
- ret = register_netdevice(mon_ndev);
+ ret = cfg80211_register_netdevice(mon_ndev);
if (ret) {
goto out;
}
@@ -2360,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
adapter = rtw_netdev_priv(ndev);
pwdev_priv = adapter_wdev_data(adapter);
- unregister_netdevice(ndev);
+ cfg80211_unregister_netdevice(ndev);
if (ndev == pwdev_priv->pmon_ndev) {
pwdev_priv->pmon_ndev = NULL;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 2f49c580139b..bd4e9f6ac29c 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
{
struct exar8250 *priv = pci_get_drvdata(pcidev);
struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
- struct platform_device *pdev = port->port.private_data;
+ struct platform_device *pdev;
+
+ pdev = port->port.private_data;
+ if (!pdev)
+ return;
device_remove_software_node(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index a8b7b50abf64..5281f8d3fb3d 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
else
mask = BIT(priv_ep->num);
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
cdns3_set_register_bit(&regs->tdl_from_trb, mask);
cdns3_set_register_bit(&regs->tdl_beh, mask);
cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
case USB_ENDPOINT_XFER_INT:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
default:
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 5f0513c96c04..68972746e363 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
{
struct cdnsp_device *pdev = (struct cdnsp_device *)data;
union cdnsp_trb *event_ring_deq;
+ unsigned long flags;
int counter = 0;
- spin_lock(&pdev->lock);
+ spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
cdnsp_died(pdev);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 4545b23bda3f..bac0f5458cab 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
int val;
unsigned long flags;
+ /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ /* TVDMSRC_DIS */
+ msleep(20);
+
/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDMSRC_ON */
+ msleep(40);
/*
* Per BC 1.2, check voltage of D+:
@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDPSRC_ON */
+ msleep(40);
/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fc7d6cdacf16..df8e69e60aaf 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -41,6 +41,8 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
+#define USB_VENDOR_CYPRESS 0x04b4
+#define USB_PRODUCT_CY7C65632 0x6570
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -5698,6 +5700,11 @@ static const struct usb_device_id hub_id_table[] = {
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_CYPRESS,
+ .idProduct = USB_PRODUCT_CY7C65632,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index b6e53d8212cd..4ac397e43e19 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1671,8 +1671,8 @@ static int dwc3_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
- dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_debugfs_exit(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
return 0;
}
-static void dwc3_shutdown(struct platform_device *pdev)
-{
- dwc3_remove(pdev);
-}
-
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
- .shutdown = dwc3_shutdown,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index d0ac89c5b317..d223c54115f4 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
extern void dwc3_debugfs_init(struct dwc3 *d);
extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{ }
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 7146ee2ac057..5dbbe53269d3 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
}
}
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
- struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
struct dentry *dir;
- dir = debugfs_create_dir(dep->name, parent);
+ dir = debugfs_create_dir(dep->name, dep->dwc->root);
dwc3_debugfs_create_endpoint_files(dep, dir);
}
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
- struct dentry *parent)
-{
- int i;
-
- for (i = 0; i < dwc->num_eps; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- dwc3_debugfs_create_endpoint_dir(dep, parent);
- }
-}
-
void dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", 0644, root, dwc,
&dwc3_link_state_fops);
- dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index bdf1f98dfad8..ffe301d6ea35 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
return PTR_ERR(priv->usb_glue_regmap);
/* Create a regmap for each USB2 PHY control register set */
- for (i = 0; i < priv->usb2_ports; i++) {
+ for (i = 0; i < priv->drvdata->num_phys; i++) {
struct regmap_config u2p_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
.max_register = U2P_R1,
};
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
"u2p-%d", i);
if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->usb_init(priv);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
}
/* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
+err_disable_regulator:
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 8b668ef46f7f..3cd294264372 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
epnum |= 1;
dep = dwc->eps[epnum];
+ if (dep == NULL)
+ return NULL;
+
if (dep->flags & DWC3_EP_ENABLED)
return dep;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 612825a39f82..f14c2aa83759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2261,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
- * Synchronize any pending event handling before executing the controller
- * halt routine.
+ * Synchronize and disable any further event handling while controller
+ * is being enabled/disabled.
*/
- if (!is_on) {
- dwc3_gadget_disable_irq(dwc);
- synchronize_irq(dwc->irq_gadget);
- }
+ disable_irq(dwc->irq_gadget);
spin_lock_irqsave(&dwc->lock, flags);
@@ -2305,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq_gadget);
+
pm_runtime_put(dwc->dev);
return ret;
@@ -2754,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
+ dwc3_debugfs_create_endpoint_dir(dep);
+
return 0;
}
@@ -2797,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
+ debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
kfree(dep);
}
}
@@ -4046,6 +4048,7 @@ err5:
dwc3_gadget_free_endpoints(dwc);
err4:
usb_put_gadget(dwc->gadget);
+ dwc->gadget = NULL;
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
@@ -4065,6 +4068,9 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
+ if (!dwc->gadget)
+ return;
+
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 8bb25773b61e..05507606b2b4 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
{
struct usb_gadget *g = f->config->cdev->gadget;
+ /* super-speed-plus descriptor falls back to super-speed one,
+ * if such a descriptor was provided, thus avoiding a NULL
+ * pointer dereference if a 5gbps capable gadget is used with
+ * a 10gbps capable config (device port + cable + host port)
+ */
+ if (!ssp)
+ ssp = ss;
+
if (fs) {
f->fs_descriptors = usb_copy_descriptors(fs);
if (!f->fs_descriptors)
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 7f5cf488b2b1..ffe2486fce71 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ecm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
- ecm_ss_function, NULL);
+ ecm_ss_function, ecm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index cfcc4e81fb77..2cd9942707b4 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
- eem_ss_function, NULL);
+ eem_ss_function, eem_ss_function);
if (status)
goto fail;
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
- continue;
+ goto next;
}
skb_trim(skb2, len - ETH_FCS_LEN);
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
GFP_ATOMIC);
if (unlikely(!skb3)) {
dev_kfree_skb_any(skb2);
- continue;
+ goto next;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bf109191659a..d4844afeaffc 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
ffs->func = NULL;
}
+ /* Drain any pending AIO completions */
+ drain_workqueue(ffs->io_completion_wq);
+
if (!--opts->refcnt)
functionfs_unbind(ffs);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 1125f4715830..e55699308117 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors,
+ hidg_ss_descriptors);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index b56ad7c3838b..ae41f556eb75 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -207,7 +207,7 @@ autoconf_fail:
ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
- ss_loopback_descs, NULL);
+ ss_loopback_descs, ss_loopback_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 019bea8e09cc..855127249f24 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
data[1] = data[0];
- DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+ DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
ncm->notify_state = NCM_NOTIFY_CONNECT;
break;
}
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
ncm->ndp_dgram_count = 1;
/* Note: we skip opts->next_ndp_index */
- }
- /* Delay the timer. */
- hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL_SOFT);
+ /* Start the timer. */
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+ HRTIMER_MODE_REL_SOFT);
+ }
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index f47fdc1fa7f1..59d382fe1bbf 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1101,7 +1101,8 @@ autoconf_fail:
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, ss_printer_function, NULL);
+ hs_printer_function, ss_printer_function,
+ ss_printer_function);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 0739b05a0ef7..ee95e8f5f9d4 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
- eth_ss_function, NULL);
+ eth_ss_function, eth_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index e62713846350..1ed8ff0ac2d3 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
- gser_ss_function, NULL);
+ gser_ss_function, gser_ss_function);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 5a201ba7b155..1abf08e5164a 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -431,7 +431,8 @@ no_iso:
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs,
- hs_source_sink_descs, ss_source_sink_descs, NULL);
+ hs_source_sink_descs, ss_source_sink_descs,
+ ss_source_sink_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 4d945254905d..51c1cae162d9 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
fs_subset_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
- ss_eth_function, NULL);
+ ss_eth_function, ss_eth_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7acb507946e6..de161ee0b1f9 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
- uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+ uasp_hs_function_desc, uasp_ss_function_desc,
+ uasp_ss_function_desc);
if (ret)
goto ep_fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7bc18cf8042c..18c2bbddf080 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+ xhci->quirks |= XHCI_BROKEN_D3COLD;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* Systems with the TI redriver that loses port status change events
* need to have the registers polled during D3, so avoid D3cold.
*/
- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
pci_d3cold_disable(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2595a8f057c4..e417f5ce13d1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD BIT_ULL(41)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
index b3cfe8666ea7..336653091e3b 100644
--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
+++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
return -EINVAL;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
pdata = devm_kzalloc(&pdev->dev,
sizeof(*pdata) +
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 8f09a387b773..4c8f0112481f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
- break;
}
- fallthrough;
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ee595d1bea0a..fcb812bc832c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
u8 gpio_input;
#endif
u8 partnum;
+ u32 fw_version;
speed_t min_speed;
speed_t max_speed;
bool use_actual_rate;
+ bool no_flow_control;
};
enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
/* CP210X_VENDOR_SPECIFIC values */
#define CP210X_READ_2NCONFIG 0x000E
+#define CP210X_GET_FW_VER_2N 0x0010
#define CP210X_READ_LATCH 0x00C2
#define CP210X_GET_PARTNUM 0x370B
#define CP210X_GET_PORTCONFIG 0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
#define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587
#define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6)
+
/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
struct cp210x_gpio_write {
u8 mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
static void cp210x_set_flow_control(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_special_chars chars;
struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
u32 ctl_hs;
int ret;
+ /*
+ * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+ * CP2102N_E104). Report back that flow control is not supported.
+ */
+ if (priv->no_flow_control) {
+ tty->termios.c_cflag &= ~CRTSCTS;
+ tty->termios.c_iflag &= ~(IXON | IXOFF);
+ }
+
if (old_termios &&
C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
port_priv->crtscts = false;
}
- if (I_IXOFF(tty))
+ if (I_IXOFF(tty)) {
flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
- else
+
+ flow_ctl.ulXonLimit = cpu_to_le32(128);
+ flow_ctl.ulXoffLimit = cpu_to_le32(128);
+ } else {
flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+ }
if (I_IXON(tty))
flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
else
flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
- flow_ctl.ulXonLimit = cpu_to_le32(128);
- flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
ctl_hs, flow_repl);
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
/* 0 indicates GPIO mode, 1 is alternate function */
- priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+ /* QFN20 is special... */
+ if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE) /* GPIO 0 */
+ priv->gpio_altfunc |= BIT(0);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+ priv->gpio_altfunc |= BIT(1);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+ priv->gpio_altfunc |= BIT(2);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+ priv->gpio_altfunc |= BIT(3);
+ } else {
+ priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ }
if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
/*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
priv->use_actual_rate = use_actual_rate;
}
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 ver[3];
+ int ret;
+
+ ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+ ver, sizeof(ver));
+ if (ret)
+ return ret;
+
+ dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+ ver[0], ver[1], ver[2]);
+
+ priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+ return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ int ret;
+
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+ ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+ if (ret)
+ break;
+ if (priv->fw_version <= 0x10004)
+ priv->no_flow_control = true;
+ break;
+ default:
+ break;
+ }
+}
+
static int cp210x_attach(struct usb_serial *serial)
{
int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
usb_set_serial_data(serial, priv);
+ cp210x_determine_quirks(serial);
cp210x_init_max_speed(serial);
result = cp210x_gpio_init(serial);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 369ef140df78..4a1f3a95d017 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d854e04a4286..add602bebd82 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -581,6 +581,7 @@
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID 0x7c93 /* OrionMX */
/*
* Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 83c62f920c50..41f1b872d277 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
*
* Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
*
@@ -22,10 +22,11 @@
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+ { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
.owner = THIS_MODULE,
.name = "omninet",
},
- .description = "ZyXEL - omni.net lcd plus usb",
+ .description = "ZyXEL - omni.net usb",
.id_table = id_table,
.num_bulk_out = 2,
.calc_num_ports = omninet_calc_num_ports,
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 5f2e7f668e68..067690dac24c 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 1,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port receive buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 0,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
int status;
/* power on unit */
- status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0xc2, 0x40, 0x8000, 0, NULL, 0,
QT2_USB_TIMEOUT);
if (status < 0) {
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 8514bec7e1b8..77dabd306ba8 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -239,7 +239,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 46a25b8db72e..ffa8aa12d5f1 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
acpi_dev_free_resource_list(&resource_list);
if (!pmc->iom_base) {
- put_device(&adev->dev);
+ acpi_dev_put(adev);
return -ENOMEM;
}
+ if (IS_ERR(pmc->iom_base)) {
+ acpi_dev_put(adev);
+ return PTR_ERR(pmc->iom_base);
+ }
+
pmc->iom_adev = adev;
return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
break;
ret = pmc_usb_register_port(pmc, i, fwnode);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
goto err_remove_ports;
+ }
}
platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return ret;
}
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 9ce8c9af4da5..63470cf7f4cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -401,6 +401,8 @@ struct tcpm_port {
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
+ u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+ unsigned int nr_snk_vdo_v1;
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
@@ -1547,42 +1549,45 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
+ svdm_version = PD_VDO_SVDM_VER(p[0]);
+ }
- tcpm_ams_start(port, DISCOVER_IDENTITY);
- /* 6.4.4.3.1: Only respond as UFP (device) */
- if (port->data_role == TYPEC_DEVICE &&
+ port->ams = DISCOVER_IDENTITY;
+ /*
+ * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+ * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+ * "wrong configuation" or "Unrecognized"
+ */
+ if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
port->nr_snk_vdo) {
- /*
- * Product Type DFP and Connector Type are not defined in SVDM
- * version 1.0 and shall be set to zero.
- */
- if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
- response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
- & ~IDH_CONN_MASK;
- else
- response[1] = port->snk_vdo[0];
- for (i = 1; i < port->nr_snk_vdo; i++)
- response[i + 1] = port->snk_vdo[i];
- rlen = port->nr_snk_vdo + 1;
+ if (svdm_version < SVDM_VER_2_0) {
+ for (i = 0; i < port->nr_snk_vdo_v1; i++)
+ response[i + 1] = port->snk_vdo_v1[i];
+ rlen = port->nr_snk_vdo_v1 + 1;
+
+ } else {
+ for (i = 0; i < port->nr_snk_vdo; i++)
+ response[i + 1] = port->snk_vdo[i];
+ rlen = port->nr_snk_vdo + 1;
+ }
}
break;
case CMD_DISCOVER_SVID:
- tcpm_ams_start(port, DISCOVER_SVIDS);
+ port->ams = DISCOVER_SVIDS;
break;
case CMD_DISCOVER_MODES:
- tcpm_ams_start(port, DISCOVER_MODES);
+ port->ams = DISCOVER_MODES;
break;
case CMD_ENTER_MODE:
- tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+ port->ams = DFP_TO_UFP_ENTER_MODE;
break;
case CMD_EXIT_MODE:
- tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+ port->ams = DFP_TO_UFP_EXIT_MODE;
break;
case CMD_ATTENTION:
- tcpm_ams_start(port, ATTENTION);
/* Attention command does not have response */
*adev_action = ADEV_ATTENTION;
return 0;
@@ -1937,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+ tcpm_ams_finish(port);
+ } else {
tcpm_ams_finish(port);
}
break;
@@ -2183,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
if (!type) {
tcpm_log(port, "Alert message received with no type");
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
return;
}
/* Just handling non-battery alerts for now */
if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = GET_STATUS_SEND;
+ tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+ } else {
+ /*
+ * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+ * SinkTxOk in time.
+ */
+ port->ams = GETTING_SOURCE_SINK_STATUS;
tcpm_set_state(port, GET_STATUS_SEND, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
}
+ } else {
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
}
}
@@ -2440,7 +2453,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
- tcpm_handle_alert(port, msg->payload, cnt);
+ if (port->state != SRC_READY && port->state != SNK_READY)
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ else
+ tcpm_handle_alert(port, msg->payload, cnt);
break;
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
@@ -2764,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
switch (type) {
case PD_EXT_STATUS:
- /*
- * If PPS related events raised then get PPS status to clear
- * (see USB PD 3.0 Spec, 6.5.2.4)
- */
- if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
- USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
- GETTING_SOURCE_SINK_STATUS, 0);
-
- else
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
- break;
case PD_EXT_PPS_STATUS:
- /*
- * For now the PPS status message is used to clear events
- * and nothing more.
- */
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+ } else {
+ /* unexpected Status or PPS_Status Message */
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ }
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
@@ -5947,6 +5957,22 @@ sink:
return ret;
}
+ /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+ if (port->nr_snk_vdo) {
+ ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ENODATA;
+
+ port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+ port->snk_vdo_v1,
+ port->nr_snk_vdo_v1);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -6312,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ hrtimer_cancel(&port->send_discover_timer);
+ hrtimer_cancel(&port->enable_frs_timer);
+ hrtimer_cancel(&port->vdm_state_machine_timer);
+ hrtimer_cancel(&port->state_machine_timer);
+
tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 79ae63950050..5d125339687a 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
const u8 *data = (void *)msg;
int i;
- for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
data[i]);
if (ret)
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index b433169ef6fa..b7d104c80d85 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1253,6 +1253,7 @@ err_unregister:
}
err_reset:
+ memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;
diff --git a/fs/afs/main.c b/fs/afs/main.c
index b2975256dadb..179004b15566 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -203,8 +203,8 @@ static int __init afs_init(void)
goto error_fs;
afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
- if (IS_ERR(afs_proc_symlink)) {
- ret = PTR_ERR(afs_proc_symlink);
+ if (!afs_proc_symlink) {
+ ret = -ENOMEM;
goto error_proc;
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3edb6204b937..e9ccaa3baf2e 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -730,7 +730,7 @@ static int afs_writepages_region(struct address_space *mapping,
return ret;
}
- start += ret * PAGE_SIZE;
+ start += ret;
cond_resched();
} while (wbc->nr_to_write > 0);
@@ -837,6 +837,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
unsigned long priv;
+ vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -848,14 +849,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page) &&
wait_on_page_fscache_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
#endif
if (wait_on_page_writeback_killable(page))
- return VM_FAULT_RETRY;
+ goto out;
if (lock_page_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
/* We mustn't change page->private until writeback is complete as that
* details the portion of the page we need to write back and we might
@@ -863,7 +864,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
*/
if (wait_on_page_writeback_killable(page) < 0) {
unlock_page(page);
- return VM_FAULT_RETRY;
+ goto out;
}
priv = afs_page_dirty(page, 0, thp_size(page));
@@ -877,8 +878,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
}
file_update_time(file);
+ ret = VM_FAULT_LOCKED;
+out:
sb_end_pagefault(inode->i_sb);
- return VM_FAULT_LOCKED;
+ return ret;
}
/*
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index aa57bdc8fc89..6d5c4e45cfef 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2442,16 +2442,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (!--cache->ro) {
- num_bytes = cache->length - cache->reserved -
- cache->pinned - cache->bytes_super -
- cache->zone_unusable - cache->used;
- sinfo->bytes_readonly -= num_bytes;
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable = cache->alloc_offset - cache->used;
sinfo->bytes_zone_unusable += cache->zone_unusable;
sinfo->bytes_readonly -= cache->zone_unusable;
}
+ num_bytes = cache->length - cache->reserved -
+ cache->pinned - cache->bytes_super -
+ cache->zone_unusable - cache->used;
+ sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c9a3036c23bf..8d386a5587ee 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2648,6 +2648,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+ BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+ "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+ fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ ret = -EINVAL;
+ }
+
+ if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+ memcmp(fs_info->fs_devices->metadata_uuid,
+ fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+ fs_info->super_copy->metadata_uuid,
+ fs_info->fs_devices->metadata_uuid);
+ ret = -EINVAL;
+ }
+
if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
@@ -3279,14 +3297,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
- ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE));
-
- if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
- ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid,
- BTRFS_FSID_SIZE));
- }
features = btrfs_super_flags(disk_super);
if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3b10d98b4ebb..55f68422061d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int del_nr = 0;
int del_slot = 0;
int recow;
- int ret;
+ int ret = 0;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
@@ -1315,7 +1315,7 @@ again:
}
out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
/*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 362d14db1e38..dbcf8bb2f3b9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3302,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* begins and releases it only after writing its superblock.
*/
mutex_lock(&fs_info->tree_log_mutex);
+
+ /*
+ * The previous transaction writeout phase could have failed, and thus
+ * marked the fs in an error state. We must not commit here, as we
+ * could have updated our generation in the super_for_commit and
+ * writing the super here would result in transid mismatches. If there
+ * is an error here just bail.
+ */
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ ret = -EIO;
+ btrfs_set_log_full_commit(trans);
+ btrfs_abort_transaction(trans, ret);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ goto out_wake_log_root;
+ }
+
btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 1bb8ee97aae0..f1f3b10d1dbb 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
return (u32)zone;
}
+static inline sector_t zone_start_sector(u32 zone_number,
+ struct block_device *bdev)
+{
+ return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+ struct btrfs_zoned_device_info *zone_info)
+{
+ return (u64)zone_number << zone_info->zone_size_shift;
+}
+
/*
* Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
* device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (sb_zone + 1 >= zone_info->nr_zones)
continue;
- sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
- ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+ ret = btrfs_get_dev_zones(device,
+ zone_start_physical(sb_zone, zone_info),
&zone_info->sb_zones[sb_pos],
&nr_zones);
if (ret)
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+ ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
zones);
if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
return -ENOENT;
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- sb_zone << zone_sectors_shift,
+ zone_start_sector(sb_zone, bdev),
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
}
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
if (!(end <= sb_zone ||
sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
have_sb = true;
- pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+ pos = zone_start_physical(
+ sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
break;
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 2868e3e171ae..c3d8fc14b993 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- return signal_pending(current);
+ return fatal_signal_pending(current) || freezing(current);
}
static void wait_for_dump_helpers(struct file *file)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index e813acfaa6e8..ba7c01cd9a5d 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
copy[copy_len] = '\n';
- ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+ ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
kfree(copy);
return ret;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 55efd3dd04f6..30dee68458c7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -735,6 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
__SetPageUptodate(page);
error = huge_add_to_page_cache(page, mapping, index);
if (unlikely(error)) {
+ restore_reserve_on_error(h, &pseudo_vma, addr, page);
put_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 42380ed563c4..fa8794c61af7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -783,6 +783,11 @@ struct io_task_work {
task_work_func_t func;
};
+enum {
+ IORING_RSRC_FILE = 0,
+ IORING_RSRC_BUFFER = 1,
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -9671,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+ IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+ IORING_FEAT_RSRC_TAGS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -9911,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
}
static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned size)
+ unsigned size, unsigned type)
{
struct io_uring_rsrc_update2 up;
@@ -9919,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
- if (!up.nr)
+ if (!up.nr || up.resv)
return -EINVAL;
- return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+ return __io_register_rsrc_update(ctx, type, &up, up.nr);
}
static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int size)
+ unsigned int size, unsigned int type)
{
struct io_uring_rsrc_register rr;
@@ -9936,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
memset(&rr, 0, sizeof(rr));
if (copy_from_user(&rr, arg, size))
return -EFAULT;
- if (!rr.nr)
+ if (!rr.nr || rr.resv || rr.resv2)
return -EINVAL;
- switch (rr.type) {
+ switch (type) {
case IORING_RSRC_FILE:
return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
rr.nr, u64_to_user_ptr(rr.tags));
@@ -9961,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
case IORING_REGISTER_PROBE:
case IORING_REGISTER_PERSONALITY:
case IORING_UNREGISTER_PERSONALITY:
- case IORING_REGISTER_RSRC:
- case IORING_REGISTER_RSRC_UPDATE:
+ case IORING_REGISTER_FILES2:
+ case IORING_REGISTER_FILES_UPDATE2:
+ case IORING_REGISTER_BUFFERS2:
+ case IORING_REGISTER_BUFFERS_UPDATE:
return false;
default:
return true;
@@ -10088,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
case IORING_REGISTER_RESTRICTIONS:
ret = io_register_restrictions(ctx, arg, nr_args);
break;
- case IORING_REGISTER_RSRC:
- ret = io_register_rsrc(ctx, arg, nr_args);
+ case IORING_REGISTER_FILES2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_FILES_UPDATE2:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_BUFFERS2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
break;
- case IORING_REGISTER_RSRC_UPDATE:
- ret = io_register_rsrc_update(ctx, arg, nr_args);
+ case IORING_REGISTER_BUFFERS_UPDATE:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_BUFFER);
break;
default:
ret = -EINVAL;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index cfeaadf56bf0..330f65727c45 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
if (cl_init->hostname == NULL) {
WARN_ON(1);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* see if the client already exists */
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 065cb04222a1..543d916f79ab 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,7 @@ struct nfs4_exception {
struct inode *inode;
nfs4_stateid *stateid;
long timeout;
+ unsigned char task_is_privileged : 1;
unsigned char delay : 1,
recovering : 1,
retry : 1;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 889a9f4c0310..42719384e25f 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
*/
nfs_mark_client_ready(clp, -EPERM);
}
- nfs_put_client(clp);
clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+ nfs_put_client(clp);
return old;
error:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0cd965882232..e653654c10bc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
ret = nfs4_wait_clnt_recover(clp);
if (test_bit(NFS_MIG_FAILED, &server->mig_status))
return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+ server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
- if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
- server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
sizeof(server->attr_bitmask));
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
do {
err = __nfs4_proc_set_acl(inode, buf, buflen);
trace_nfs4_set_acl(inode, err);
+ if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+ /*
+ * no need to retry since the kernel
+ * isn't involved in encoding the ACEs.
+ */
+ err = -EINVAL;
+ break;
+ }
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
} while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
struct nfs4_exception exception = {
.inode = data->inode,
.stateid = &data->stateid,
+ .task_is_privileged = data->args.seq_args.sa_privileged,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
- nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
nfs4_state_protect(server->nfs_client,
NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
}
}
+ if (!data->inode)
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 0);
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
&task_setup_data.rpc_client, &msg);
dprintk("--> %s\n", __func__);
+ lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!sync) {
- lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!lrp->inode) {
nfs4_layoutreturn_release(lrp);
return -EAGAIN;
}
task_setup_data.flags |= RPC_TASK_ASYNC;
}
- nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+ if (!lrp->inode)
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index eb1ef3462e84..ccef43e02b48 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
{ O_NOATIME, "O_NOATIME" }, \
{ O_CLOEXEC, "O_CLOEXEC" })
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
#define show_fmode_flags(mode) \
__print_flags(mode, "|", \
{ ((__force unsigned long)FMODE_READ), "READ" }, \
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index be5b6d2c01e7..64864fb40b40 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -471,7 +471,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
info_type, fanotify_info_name(info),
info->name_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
@@ -519,7 +519,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fanotify_event_object_fh(event),
info_type, dot, dot_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 58bbf334265b..9cbd915025ad 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2674,6 +2674,13 @@ out:
}
#ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+ return 0;
+}
+
static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
@@ -2704,7 +2711,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
int rv;
/* A task may only write when it was the opener. */
- if (file->f_cred != current_real_cred())
+ if (file->private_data != current->mm)
return -EPERM;
rcu_read_lock();
@@ -2754,9 +2761,11 @@ out:
}
static const struct file_operations proc_pid_attr_operations = {
+ .open = proc_pid_attr_open,
.read = proc_pid_attr_read,
.write = proc_pid_attr_write,
.llseek = generic_file_llseek,
+ .release = mem_release,
};
#define LSM_DIR_OPS(LSM) \
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 40a9c101565e..17325416e2de 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -960,6 +960,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
+ *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index fef3ef65967f..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -106,6 +106,10 @@
* <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
/* SOP Product Type (UFP) */
#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
@@ -163,10 +167,10 @@
#define UFP_VDO_VER1_2 2
/* Device Capability */
-#define DEV_USB2_CAPABLE BIT(0)
-#define DEV_USB2_BILLBOARD BIT(1)
-#define DEV_USB3_CAPABLE BIT(2)
-#define DEV_USB4_CAPABLE BIT(3)
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
/* Connector Type */
#define UFP_RECEPTACLE 2
@@ -191,9 +195,9 @@
/* Alternate Modes */
#define UFP_ALTMODE_NOT_SUPP 0
-#define UFP_ALTMODE_TBT3 BIT(0)
-#define UFP_ALTMODE_RECFG BIT(1)
-#define UFP_ALTMODE_NO_RECFG BIT(2)
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
/* USB Highest Speed */
#define UFP_USB2_ONLY 0
@@ -217,9 +221,9 @@
* <4:0> :: Port number
*/
#define DFP_VDO_VER1_1 1
-#define HOST_USB2_CAPABLE BIT(0)
-#define HOST_USB3_CAPABLE BIT(1)
-#define HOST_USB4_CAPABLE BIT(2)
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
#define DFP_RECEPTACLE 2
#define DFP_CAPTIVE 3
@@ -228,7 +232,25 @@
| ((pnum) & 0x1f))
/*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -244,7 +266,7 @@
* <4:3> :: Reserved, Shall be set to zero
* <2:0> :: USB highest speed
*
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -266,7 +288,9 @@
#define CABLE_VDO_VER1_0 0
#define CABLE_VDO_VER1_3 3
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
#define CABLE_CAPTIVE 3
@@ -303,12 +327,22 @@
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
/* USB Highest Speed */
#define CABLE_USB2_ONLY 0
#define CABLE_USB32_GEN1 1
#define CABLE_USB32_4_GEN2 2
#define CABLE_USB4_GEN3 3
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
| ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
@@ -374,6 +408,35 @@
| (iso) << 2 | (gen))
/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
* VPD VDO
* ---------
* <31:28> :: HW version
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index f180240dc95f..11e555cfaecb 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
- SCALE_FREQ_SOURCE_CPPC,
};
struct scale_freq_data {
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index c043b8d2b17b..183ddd5fd072 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -199,6 +199,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 8b2b1d68b954..136b8d97d8c0 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -3,6 +3,7 @@
#define __LINUX_ENTRYKVM_H
#include <linux/entry-common.h>
+#include <linux/tick.h>
/* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
static inline void xfer_to_guest_mode_prepare(void)
{
lockdep_assert_irqs_disabled();
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
}
/**
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9626fda5efce..2a8ebe6c222e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
@@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b92f25ccef58..6504346a1947 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -149,6 +149,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
@@ -339,6 +340,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ return 0;
+}
+
static inline void putback_active_hugepage(struct page *page)
{
}
@@ -604,6 +610,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page);
/* arch callback */
int __init __alloc_bootmem_huge_page(struct hstate *h);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 76102efbf079..8583ed3ff344 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1185,7 +1185,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index a57af878fd0c..4a5966475a35 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -26,9 +26,7 @@ struct bd70528_data {
struct mutex rtc_timer_lock;
};
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
#define BD70528_LDO_VOLTS 0x20
#define BD70528_REG_BUCK1_EN 0x0F
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index c7ab69c87ee8..3b5f3a7db4bd 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -26,11 +26,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 236a7d04f891..30bb59fe970c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 020a8f7fdbdd..f8902bcd91e2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -542,6 +542,10 @@ struct mlx5_core_roce {
enum {
MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
};
struct mlx5_adev {
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 028f442530cf..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c274f75efcf9..8ae31622deef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1719,6 +1719,7 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ struct page *single_page; /* Locked page to be unmapped */
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1766,6 +1767,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
@@ -1786,6 +1788,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5aacc1c10a45..8f0fb62e8975 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,13 +445,6 @@ struct mm_struct {
*/
atomic_t has_pinned;
- /**
- * @write_protect_seq: Locked when any thread is write
- * protecting pages mapped by this mm to enforce a later COW,
- * for instance during page table copying for fork().
- */
- seqcount_t write_protect_seq;
-
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
@@ -460,6 +453,18 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -480,7 +485,15 @@ struct mm_struct {
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d47fd33b228..51d7f1b8b32a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
* @ppm: Parts per million, but with a 16 bit binary fractional field
*/
-extern s32 scaled_ppm_to_ppb(long ppm);
+extern long scaled_ppm_to_ppb(long ppm);
/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..8d04e7deedc6 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,6 +91,7 @@ enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 6f155f99aa16..4ab7bfc675f1 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1109,6 +1109,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..28a98fc4ded4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -350,11 +350,19 @@ struct load_weight {
* Only for tasks we track a moving average of the past instantaneous
* estimated utilization. This allows to absorb sporadic drops in utilization
* of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
*/
struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
} __attribute__((__aligned__(sizeof(u64))));
/*
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d9b7c9132c2f..6430a94c6981 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -23,6 +23,16 @@
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
- if (pte_swp_uffd_wp(pte))
- pte = pte_swp_clear_uffd_wp(pte);
+ pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7340613c7eff..1a0ff88fa107 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -11,6 +11,7 @@
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch();
}
+static inline void tick_nohz_user_enter_prepare(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ rcu_nocb_flush_deferred_wakeup();
+}
+
#endif
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index bf00259493e0..96b7ff66f074 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */
#define PD_T_SOURCE_ACTIVITY 45
#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */
#define PD_T_PS_TRANSITION 500
#define PD_T_SRC_TRANSITION 35
#define PD_T_DRP_SNK 40
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
index 0eb83ce19597..b517ebc8f0ff 100644
--- a/include/linux/usb/pd_ext_sdb.h
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
-#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
- USB_PD_EXT_SDB_EVENT_OTP | \
- USB_PD_EXT_SDB_EVENT_OVP)
-
#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 445b66c6eb7e..e89530d0d9c6 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5537,7 +5537,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -6392,7 +6392,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
/**
* ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
- * of injected frames
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
* @skb: packet injected by userspace
* @dev: the &struct device of this 802.11 device
*/
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fa5887143f0d..bdc0459a595e 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -184,6 +184,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -203,13 +206,22 @@ static inline void net_ns_get_ownership(const struct net *net,
}
static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/sock.h b/include/net/sock.h
index 0e962d8bc73b..7a7058f4f265 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2266,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index e1ae46683301..162ff99ed2cb 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -280,6 +280,7 @@ struct io_uring_params {
#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
+#define IORING_FEAT_RSRC_TAGS (1U << 10)
/*
* io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
IORING_UNREGISTER_PERSONALITY = 10,
IORING_REGISTER_RESTRICTIONS = 11,
IORING_REGISTER_ENABLE_RINGS = 12,
- IORING_REGISTER_RSRC = 13,
- IORING_REGISTER_RSRC_UPDATE = 14,
+
+ /* extended with tagging */
+ IORING_REGISTER_FILES2 = 13,
+ IORING_REGISTER_FILES_UPDATE2 = 14,
+ IORING_REGISTER_BUFFERS2 = 15,
+ IORING_REGISTER_BUFFERS_UPDATE = 16,
/* this goes last */
IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
-enum {
- IORING_RSRC_FILE = 0,
- IORING_RSRC_BUFFER = 1,
-};
-
struct io_uring_rsrc_register {
- __u32 type;
__u32 nr;
+ __u32 resv;
+ __u64 resv2;
__aligned_u64 data;
__aligned_u64 tags;
};
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
__u32 resv;
__aligned_u64 data;
__aligned_u64 tags;
- __u32 type;
__u32 nr;
+ __u32 resv2;
};
/* Skip updating fd indexes set to this value in the fd table */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 94ba5163d4c5..c6a27574242d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6483,6 +6483,27 @@ struct bpf_sanitize_info {
bool mask_to_left;
};
+static struct bpf_verifier_state *
+sanitize_speculative_path(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ u32 next_idx, u32 curr_idx)
+{
+ struct bpf_verifier_state *branch;
+ struct bpf_reg_state *regs;
+
+ branch = push_stack(env, next_idx, curr_idx, true);
+ if (branch && insn) {
+ regs = branch->frame[branch->curframe]->regs;
+ if (BPF_SRC(insn->code) == BPF_K) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ } else if (BPF_SRC(insn->code) == BPF_X) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->src_reg);
+ }
+ }
+ return branch;
+}
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
@@ -6566,12 +6587,26 @@ do_sim:
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
- ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+ env->insn_idx);
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? REASON_STACK : 0;
}
+static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+
+ /* If we simulate paths under speculation, we don't update the
+ * insn as 'seen' such that when we verify unreachable paths in
+ * the non-speculative domain, sanitize_dead_code() can still
+ * rewrite/sanitize them.
+ */
+ if (!vstate->speculative)
+ env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+}
+
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
@@ -8750,14 +8785,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (err)
return err;
}
+
if (pred == 1) {
- /* only follow the goto, ignore fall-through */
+ /* Only follow the goto, ignore fall-through. If needed, push
+ * the fall-through branch for simulation under speculative
+ * execution.
+ */
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
- /* only follow fall-through branch, since
- * that's where the program will go
+ /* Only follow the fall-through branch, since that's where the
+ * program will go. If needed, push the goto branch for
+ * simulation under speculative execution.
*/
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
return 0;
}
@@ -10630,7 +10679,7 @@ static int do_check(struct bpf_verifier_env *env)
}
regs = cur_regs(env);
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
prev_insn_idx = env->insn_idx;
if (class == BPF_ALU || class == BPF_ALU64) {
@@ -10857,7 +10906,7 @@ process_bpf_exit:
return err;
env->insn_idx++;
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -11366,6 +11415,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
struct bpf_insn *insn = new_prog->insnsi;
+ u32 old_seen = old_data[off].seen;
u32 prog_len;
int i;
@@ -11386,7 +11436,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++) {
- new_data[i].seen = env->pass_cnt;
+ /* Expand insni[off]'s seen count to the patched range. */
+ new_data[i].seen = old_seen;
new_data[i].zext_dst = insn_has_def32(env, insn + i);
}
env->insn_aux_data = new_data;
@@ -12710,6 +12761,9 @@ static void free_states(struct bpf_verifier_env *env)
* insn_aux_data was touched. These variables are compared to clear temporary
* data from failed pass. For testing and experiments do_check_common() can be
* run multiple times even when prior attempt to verify is unsuccessful.
+ *
+ * Note that special handling is needed on !env->bypass_spec_v1 if this is
+ * ever called outside of error path with subsequent program rejection.
*/
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
{
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 8190b6bfc978..1f274d7fc934 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
struct cgroup *cgrp = kn->priv;
int ret;
+ /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+ if (strchr(new_name_str, '\n'))
+ return -EINVAL;
+
if (kernfs_type(kn) != KERNFS_DIR)
return -ENOTDIR;
if (kn->parent != new_parent)
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 825284baaf46..684a6061a13a 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map);
+ VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
#endif
VMCOREINFO_STRUCT_SIZE(page);
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index a0b3b04fb596..bf16395b9e13 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
+#include <linux/tick.h>
#include "common.h"
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
ti_work = READ_ONCE(current_thread_info()->flags);
}
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6fee4a7e88d7..fe88d6eea3c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4609,7 +4609,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
++ctx->pin_count;
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
return ctx;
}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 23a7a0ba1388..db8c248ebc8c 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
if (!irq_work_claim(work))
return false;
- /*record irq_work call stack in order to print it in KASAN reports*/
- kasan_record_aux_stack(work);
-
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
__irq_work_queue_local(work);
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 7a1414622051..94232186fccb 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
/* No obstacles. */
return vprintk_default(fmt, args);
}
+EXPORT_SYMBOL(vprintk);
void __init printk_safe_init(void)
{
@@ -411,4 +412,3 @@ void __init printk_safe_init(void)
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
-EXPORT_SYMBOL(vprintk);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5226cc26a095..4ca80df205ce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6389,7 +6389,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 9c882f20803e..c5aacbd492a1 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
#define __P(F) __PS(#F, F)
#define P(F) __PS(#F, p->F)
+#define PM(F, M) __PS(#F, p->F & (M))
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
#define __PN(F) __PSN(#F, F)
#define PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
P(se.avg.util_est.ewma);
- P(se.avg.util_est.enqueued);
+ PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3248e24a90b0..2c8a9352590d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
- s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- delta_avg = load_avg - se->avg.load_avg;
+ delta = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ /*
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+ * See ___update_load_avg() for details.
+ */
+ u32 divider = get_pelt_divider(&cfs_rq->avg);
+
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+ cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
@@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+ return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
}
static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
- ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+ ue.enqueued = task_util(p);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
@@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
+ ue.enqueued |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_se_tp(&p->se);
@@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(cfs_rq_of(se), se, 0);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 1462846d244e..cfe94ffd2b38 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
}
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
static inline void cfs_se_util_change(struct sched_avg *avg)
{
unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
if (!sched_feat(UTIL_EST))
return;
- /* Avoid store if the flag has been already set */
+ /* Avoid store if the flag has been already reset */
enqueued = avg->util_est.enqueued;
if (!(enqueued & UTIL_AVG_UNCHANGED))
return;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 828b091501ca..6784f27a3099 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2e8a3fde7104..72ef4dccbcc4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
static void print_ip_ins(const char *fmt, const unsigned char *p)
{
+ char ins[MCOUNT_INSN_SIZE];
int i;
+ if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+ printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+ return;
+ }
+
printk(KERN_CONT "%s", fmt);
for (i = 0; i < MCOUNT_INSN_SIZE; i++)
- printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
}
enum ftrace_bug_type ftrace_bug_type;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a21ef9cd2aae..d23a09d3eb37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
};
static struct saved_cmdlines_buffer *savedcmd;
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
static inline char *get_saved_cmdlines(int idx)
{
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
{
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
return true;
- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
- return true;
if (!__this_cpu_read(trace_taskinfo_save))
return true;
return false;
@@ -2736,7 +2731,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
@@ -3998,9 +3993,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-EBUSY);
#endif
- if (!iter->snapshot)
- atomic_inc(&trace_record_taskinfo_disabled);
-
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
@@ -4043,9 +4035,6 @@ static void s_stop(struct seq_file *m, void *p)
return;
#endif
- if (!iter->snapshot)
- atomic_dec(&trace_record_taskinfo_disabled);
-
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index c1637f90c8a3..4702efb00ff2 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
prev_time = READ_ONCE(trace_clock_struct.prev_time);
now = sched_clock_cpu(this_cpu);
- /* Make sure that now is always greater than prev_time */
+ /* Make sure that now is always greater than or equal to prev_time */
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
/*
* If in an NMI context then dont risk lockups and simply return
@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
/* Reread prev_time in case it was already updated */
prev_time = READ_ONCE(trace_clock_struct.prev_time);
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
trace_clock_struct.prev_time = now;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 63ed6b25deaa..6d2a0119fc58 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
@@ -98,6 +99,7 @@ retry:
__free_pages(zero_page, compound_order(zero_page));
goto retry;
}
+ WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
+ WRITE_ONCE(huge_zero_pfn, ~0UL);
__free_pages(zero_page, compound_order(zero_page));
return HPAGE_PMD_NR;
}
@@ -2044,7 +2047,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD);
if (!vma_is_anonymous(vma)) {
- _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
* just go ahead and zap it
@@ -2053,16 +2056,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
zap_deposited_table(mm, pmd);
if (vma_is_special_huge(vma))
return;
- page = pmd_page(_pmd);
- if (!PageDirty(page) && pmd_dirty(_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, true);
- put_page(page);
+ if (unlikely(is_pmd_migration_entry(old_pmd))) {
+ swp_entry_t entry;
+
+ entry = pmd_to_swp_entry(old_pmd);
+ page = migration_entry_to_page(entry);
+ } else {
+ page = pmd_page(old_pmd);
+ if (!PageDirty(page) && pmd_dirty(old_pmd))
+ set_page_dirty(page);
+ if (!PageReferenced(page) && pmd_young(old_pmd))
+ SetPageReferenced(page);
+ page_remove_rmap(page, true);
+ put_page(page);
+ }
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ }
+
+ if (is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2338,17 +2350,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page)
{
- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
- bool unmap_success;
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;
- unmap_success = try_to_unmap(page, ttu_flags);
- VM_BUG_ON_PAGE(!unmap_success, page);
+ try_to_unmap(page, ttu_flags);
+
+ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
static void remap_page(struct page *page, unsigned int nr)
@@ -2659,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
- int count, mapcount, extra_pins, ret;
+ int extra_pins, ret;
pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2718,7 +2730,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
unmap_page(head);
- VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
@@ -2736,9 +2747,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
- count = page_count(head);
- mapcount = total_mapcount(head);
- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+ if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
@@ -2758,16 +2767,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
__split_huge_page(page, list, end);
ret = 0;
} else {
- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
- pr_alert("total_mapcount: %u, page_count(): %u\n",
- mapcount, count);
- if (PageTail(page))
- dump_page(head, NULL);
- dump_page(page, "total_mapcount(head) > 0");
- BUG();
- }
spin_unlock(&ds_queue->split_queue_lock);
-fail: if (mapping)
+fail:
+ if (mapping)
xa_unlock(&mapping->i_pages);
local_irq_enable();
remap_page(head, thp_nr_pages(head));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5560b50876fb..e0a5f9cbbece 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2121,12 +2121,18 @@ out:
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
+ *
+ * vma_del_reservation is used in error paths where an entry in the reserve
+ * map was created during huge page allocation and must be removed. It is to
+ * be called after calling vma_needs_reservation to determine if a reservation
+ * exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
VMA_ADD_RESV,
+ VMA_DEL_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@@ -2170,11 +2176,21 @@ static long __vma_reservation_common(struct hstate *h,
ret = region_del(resv, idx, idx + 1);
}
break;
+ case VMA_DEL_RESV:
+ if (vma->vm_flags & VM_MAYSHARE) {
+ region_abort(resv, idx, idx + 1, 1);
+ ret = region_del(resv, idx, idx + 1);
+ } else {
+ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+ /* region_add calls of range 1 should never fail. */
+ VM_BUG_ON(ret < 0);
+ }
+ break;
default:
BUG();
}
- if (vma->vm_flags & VM_MAYSHARE)
+ if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
return ret;
/*
* We know private mapping must have HPAGE_RESV_OWNER set.
@@ -2222,25 +2238,39 @@ static long vma_add_reservation(struct hstate *h,
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
+static long vma_del_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
+}
+
/*
- * This routine is called to restore a reservation on error paths. In the
- * specific error paths, a huge page was allocated (via alloc_huge_page)
- * and is about to be freed. If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set
- * HPageRestoreReserve in the newly allocated page. When the page is freed
- * via free_huge_page, the global reservation count will be incremented if
- * HPageRestoreReserve is set. However, free_huge_page can not adjust the
- * reserve map. Adjust the reserve map here to be consistent with global
- * reserve count adjustments to be made by free_huge_page.
+ * This routine is called to restore reservation information on error paths.
+ * It should ONLY be called for pages allocated via alloc_huge_page(), and
+ * the hugetlb mutex should remain held when calling this routine.
+ *
+ * It handles two specific cases:
+ * 1) A reservation was in place and the page consumed the reservation.
+ * HPageRestoreReserve is set in the page.
+ * 2) No reservation was in place for the page, so HPageRestoreReserve is
+ * not set. However, alloc_huge_page always updates the reserve map.
+ *
+ * In case 1, free_huge_page later in the error path will increment the
+ * global reserve count. But, free_huge_page does not have enough context
+ * to adjust the reservation map. This case deals primarily with private
+ * mappings. Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page. Make sure the
+ * reserve map indicates there is a reservation present.
+ *
+ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
*/
-static void restore_reserve_on_error(struct hstate *h,
- struct vm_area_struct *vma, unsigned long address,
- struct page *page)
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page)
{
- if (unlikely(HPageRestoreReserve(page))) {
- long rc = vma_needs_reservation(h, vma, address);
+ long rc = vma_needs_reservation(h, vma, address);
- if (unlikely(rc < 0)) {
+ if (HPageRestoreReserve(page)) {
+ if (unlikely(rc < 0))
/*
* Rare out of memory condition in reserve map
* manipulation. Clear HPageRestoreReserve so that
@@ -2253,16 +2283,57 @@ static void restore_reserve_on_error(struct hstate *h,
* accounting of reserve counts.
*/
ClearHPageRestoreReserve(page);
- } else if (rc) {
- rc = vma_add_reservation(h, vma, address);
- if (unlikely(rc < 0))
+ else if (rc)
+ (void)vma_add_reservation(h, vma, address);
+ else
+ vma_end_reservation(h, vma, address);
+ } else {
+ if (!rc) {
+ /*
+ * This indicates there is an entry in the reserve map
+ * added by alloc_huge_page. We know it was added
+ * before the alloc_huge_page call, otherwise
+ * HPageRestoreReserve would be set on the page.
+ * Remove the entry so that a subsequent allocation
+ * does not consume a reservation.
+ */
+ rc = vma_del_reservation(h, vma, address);
+ if (rc < 0)
+ /*
+ * VERY rare out of memory condition. Since
+ * we can not delete the entry, set
+ * HPageRestoreReserve so that the reserve
+ * count will be incremented when the page
+ * is freed. This reserve will be consumed
+ * on a subsequent allocation.
+ */
+ SetHPageRestoreReserve(page);
+ } else if (rc < 0) {
+ /*
+ * Rare out of memory condition from
+ * vma_needs_reservation call. Memory allocation is
+ * only attempted if a new entry is needed. Therefore,
+ * this implies there is not an entry in the
+ * reserve map.
+ *
+ * For shared mappings, no entry in the map indicates
+ * no reservation. We are done.
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE))
/*
- * See above comment about rare out of
- * memory condition.
+ * For private mappings, no entry indicates
+ * a reservation is present. Since we can
+ * not add an entry, set SetHPageRestoreReserve
+ * on the page so reserve count will be
+ * incremented when freed. This reserve will
+ * be consumed on a subsequent allocation.
*/
- ClearHPageRestoreReserve(page);
+ SetHPageRestoreReserve(page);
} else
- vma_end_reservation(h, vma, address);
+ /*
+ * No reservation present, do nothing
+ */
+ vma_end_reservation(h, vma, address);
}
}
@@ -4037,6 +4108,8 @@ again:
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
if (!pte_same(src_pte_old, entry)) {
+ restore_reserve_on_error(h, vma, addr,
+ new);
put_page(new);
/* dst_entry won't change as in child */
goto again;
@@ -5006,6 +5079,7 @@ out_release_unlock:
if (vm_shared || is_continue)
unlock_page(page);
out_release_nounlock:
+ restore_reserve_on_error(h, dst_vma, dst_addr, page);
put_page(page);
goto out;
}
@@ -5857,6 +5931,21 @@ unlock:
return ret;
}
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ int ret = 0;
+
+ *hugetlb = false;
+ spin_lock_irq(&hugetlb_lock);
+ if (PageHeadHuge(page)) {
+ *hugetlb = true;
+ if (HPageFreed(page) || HPageMigratable(page))
+ ret = get_page_unless_zero(page);
+ }
+ spin_unlock_irq(&hugetlb_lock);
+ return ret;
+}
+
void putback_active_hugepage(struct page *page)
{
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/internal.h b/mm/internal.h
index 2f1182948aa6..e8fdb531f887 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -384,27 +384,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/*
- * At what user virtual address is page expected in @vma?
+ * At what user virtual address is page expected in vma?
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
*/
static inline unsigned long
-__vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address(struct page *page, struct vm_area_struct *vma)
{
- pgoff_t pgoff = page_to_pgoff(page);
- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page);
+ if (pgoff >= vma->vm_pgoff) {
+ address = vma->vm_start +
+ ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address >= vma->vm_end)
+ address = -EFAULT;
+ } else if (PageHead(page) &&
+ pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+ /* Test above avoids possibility of wrap to 0 on 32-bit */
+ address = vma->vm_start;
+ } else {
+ address = -EFAULT;
+ }
+ return address;
}
+/*
+ * Then at what user virtual address will none of the page be found in vma?
+ * Assumes that vma_address() already returned a good starting address.
+ * If page is a compound head, the entire compound page is considered.
+ */
static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address_end(struct page *page, struct vm_area_struct *vma)
{
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
-
- /* page should be within @vma mapping range */
- VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
-
- return max(start, vma->vm_start);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page) + compound_nr(page);
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address > vma->vm_end)
+ address = vma->vm_end;
+ return address;
}
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..0143d32bc666 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
}
+/*
+ * Return true if a page type of a given page is supported by hwpoison
+ * mechanism (while handling could fail), otherwise false. This function
+ * does not return true for hugetlb or device memory pages, so it's assumed
+ * to be called only in the context where we never have such pages.
+ */
+static inline bool HWPoisonHandlable(struct page *page)
+{
+ return PageLRU(page) || __PageMovable(page);
+}
+
/**
* __get_hwpoison_page() - Get refcount for memory error handling:
* @page: raw error page (hit by memory error)
@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
static int __get_hwpoison_page(struct page *page)
{
struct page *head = compound_head(page);
+ int ret = 0;
+ bool hugetlb = false;
+
+ ret = get_hwpoison_huge_page(head, &hugetlb);
+ if (hugetlb)
+ return ret;
- if (!PageHuge(head) && PageTransHuge(head)) {
+ /*
+ * This check prevents from calling get_hwpoison_unless_zero()
+ * for any unsupported type of page in order to reduce the risk of
+ * unexpected races caused by taking a page refcount.
+ */
+ if (!HWPoisonHandlable(head))
+ return 0;
+
+ if (PageTransHuge(head)) {
/*
* Non anonymous thp exists only in allocation/free time. We
* can't handle such a case correctly, so let's give it up.
@@ -1017,7 +1042,7 @@ try_again:
ret = -EIO;
}
} else {
- if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
+ if (PageHuge(p) || HWPoisonHandlable(p)) {
ret = 1;
} else {
/*
@@ -1527,7 +1552,12 @@ try_again:
return 0;
}
- if (!PageTransTail(p) && !PageLRU(p))
+ /*
+ * __munlock_pagevec may clear a writeback page's LRU flag without
+ * page_lock. We need wait writeback completion for this page or it
+ * may trigger vfs BUG while evict inode.
+ */
+ if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
goto identify_page_state;
/*
diff --git a/mm/memory.c b/mm/memory.c
index f3ffab9b9e39..486f4a2874e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
+ } else if (details && details->single_page &&
+ PageTransCompound(details->single_page) &&
+ next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+ spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+ /*
+ * Take and drop THP pmd lock so that we cannot return
+ * prematurely, while zap_huge_pmd() has cleared *pmd,
+ * but not yet decremented compound_mapcount().
+ */
+ spin_unlock(ptl);
}
+
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
@@ -3237,6 +3248,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct zap_details details = { };
+
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(PageTail(page));
+
+ details.check_mapping = mapping;
+ details.first_index = page->index;
+ details.last_index = page->index + thp_nr_pages(page) - 1;
+ details.single_page = page;
+
+ i_mmap_lock_write(mapping);
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ i_mmap_unlock_write(mapping);
+}
+
+/**
* unmap_mapping_pages() - Unmap pages from processes.
* @mapping: The address space containing pages to be unmapped.
* @start: Index of first page to be unmapped.
diff --git a/mm/migrate.c b/mm/migrate.c
index b234c3f3acb7..41ff2c9896c4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -295,6 +295,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
goto out;
page = migration_entry_to_page(entry);
+ page = compound_head(page);
/*
* Once page cache replacement of page migration started, page_count
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 2cf01d933f13..e37bd43904af 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -212,23 +212,34 @@ restart:
pvmw->ptl = NULL;
}
} else if (!pmd_present(pmde)) {
+ /*
+ * If PVMW_SYNC, take and drop THP pmd lock so that we
+ * cannot return prematurely, while zap_huge_pmd() has
+ * cleared *pmd but not decremented compound_mapcount().
+ */
+ if ((pvmw->flags & PVMW_SYNC) &&
+ PageTransCompound(pvmw->page)) {
+ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+ spin_unlock(ptl);
+ }
return false;
}
if (!map_pte(pvmw))
goto next_pte;
while (1) {
+ unsigned long end;
+
if (check_pte(pvmw))
return true;
next_pte:
/* Seek to next pte only makes sense for THP */
if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
return not_found(pvmw);
+ end = vma_address_end(pvmw->page, pvmw->vma);
do {
pvmw->address += PAGE_SIZE;
- if (pvmw->address >= pvmw->vma->vm_end ||
- pvmw->address >=
- __vma_address(pvmw->page, pvmw->vma) +
- thp_size(pvmw->page))
+ if (pvmw->address >= end)
return not_found(pvmw);
/* Did we cross page table boundary? */
if (pvmw->address % PMD_SIZE == 0) {
@@ -266,14 +277,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.vma = vma,
.flags = PVMW_SYNC,
};
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
- if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+ pvmw.address = vma_address(page, vma);
+ if (pvmw.address == -EFAULT)
return 0;
- pvmw.address = max(start, vma->vm_start);
if (!page_vma_mapped_walk(&pvmw))
return 0;
page_vma_mapped_walk_done(&pvmw);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c2210e1cdb51..4e640baf9794 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(!pmd_present(*pmdp));
- /* Below assumes pmd_present() is true */
- VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
diff --git a/mm/rmap.c b/mm/rmap.c
index 693a610e181d..e05c300048e6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
- unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
- } else if (page->mapping) {
- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
- return -EFAULT;
- } else
+ } else if (!vma->vm_file) {
return -EFAULT;
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
return -EFAULT;
- return address;
+ }
+
+ return vma_address(page, vma);
}
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- min(vma->vm_end, address + page_size(page)));
+ vma_address_end(page, vma));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ if (flags & TTU_SYNC)
+ pvmw.flags = PVMW_SYNC;
+
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
return true;
@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
+ range.end = PageKsm(page) ?
+ address + PAGE_SIZE : vma_address_end(page, vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
- address,
- min(vma->vm_end, address + page_size(page)));
+ address, range.end);
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
else
rmap_walk(page, &rwc);
- return !page_mapcount(page) ? true : false;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ return !page_mapcount(page);
}
/**
@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a4a571428c51..7cab77655f11 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -97,8 +97,7 @@ EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, unsigned int size)
{
- if (!name || in_interrupt() || size < sizeof(void *) ||
- size > KMALLOC_MAX_SIZE) {
+ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
diff --git a/mm/slub.c b/mm/slub.c
index 3f96e099817a..61bd40e3eb9a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
+#include <linux/swab.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
@@ -712,15 +713,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
s->red_left_pad);
else if (p > addr + 16)
print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section(KERN_ERR, "Object ", p,
+ print_section(KERN_ERR, "Object ", p,
min_t(unsigned int, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
off = get_info_end(s);
@@ -732,7 +733,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section(KERN_ERR, "Padding ", p + off,
+ print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
dump_stack();
@@ -909,11 +910,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
u8 *endobject = object + s->object_size;
if (s->flags & SLAB_RED_ZONE) {
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Left Redzone",
object - s->red_left_pad, val, s->red_left_pad))
return 0;
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Right Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
} else {
@@ -928,7 +929,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, page, p, "Poison",
+ !check_bytes_and_report(s, page, p, "End Poison",
p + s->object_size - 1, POISON_END, 1)))
return 0;
/*
@@ -3689,7 +3690,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
- unsigned int freepointer_area;
unsigned int order;
/*
@@ -3698,13 +3698,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* the possible location of the free pointer.
*/
size = ALIGN(size, sizeof(void *));
- /*
- * This is the area of the object where a freepointer can be
- * safely written. If redzoning adds more to the inuse size, we
- * can't use that portion for writing the freepointer, so
- * s->offset must be limited within this for the general case.
- */
- freepointer_area = size;
#ifdef CONFIG_SLUB_DEBUG
/*
@@ -3730,19 +3723,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* With that we have determined the number of bytes in actual use
- * by the object. This is the potential offset to the free pointer.
+ * by the object and redzoning.
*/
s->inuse = size;
- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
- s->ctor)) {
+ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+ s->ctor) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
* kmem_cache_free.
*
* This is the case if we do RCU, have a constructor or
- * destructor or are poisoning the objects.
+ * destructor, are poisoning the objects, or are
+ * redzoning an object smaller than sizeof(void *).
*
* The assumption that s->offset >= s->inuse means free
* pointer is outside of the object is used in the
@@ -3751,13 +3746,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
s->offset = size;
size += sizeof(void *);
- } else if (freepointer_area > sizeof(void *)) {
+ } else {
/*
* Store freelist pointer near middle of object to keep
* it away from the edges of the object to avoid small
* sized over/underflows from neighboring allocations.
*/
- s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+ s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
}
#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/sparse.c b/mm/sparse.c
index b2ada9dc00cb..55c18aff3e42 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -344,6 +344,15 @@ size_t mem_section_usage_size(void)
return sizeof(struct mem_section_usage) + usemap_size();
}
+static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
+{
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ return __pa_symbol(pgdat);
+#else
+ return __pa(pgdat);
+#endif
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
@@ -362,7 +371,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+ goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
@@ -390,7 +399,7 @@ static void __init check_usemap_section_nr(int nid,
}
usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
- pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+ pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
if (usemap_snr == pgdat_snr)
return;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 149e77454e3c..996afa8131c8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
{
- return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
+ return pte_same(pte_swp_clear_flags(pte), swp_pte);
}
/*
diff --git a/mm/truncate.c b/mm/truncate.c
index 95af244b112a..234ddd879caa 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -167,13 +167,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static void
-truncate_cleanup_page(struct address_space *mapping, struct page *page)
+static void truncate_cleanup_page(struct page *page)
{
- if (page_mapped(page)) {
- unsigned int nr = thp_nr_pages(page);
- unmap_mapping_pages(mapping, page->index, nr, false);
- }
+ if (page_mapped(page))
+ unmap_mapping_page(page);
if (page_has_private(page))
do_invalidatepage(page, 0, thp_size(page));
@@ -218,7 +215,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return -EIO;
- truncate_cleanup_page(mapping, page);
+ truncate_cleanup_page(page);
delete_from_page_cache(page);
return 0;
}
@@ -325,7 +322,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = indices[pagevec_count(&pvec) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
for (i = 0; i < pagevec_count(&pvec); i++)
- truncate_cleanup_page(mapping, pvec.pages[i]);
+ truncate_cleanup_page(pvec.pages[i]);
delete_from_page_cache_batch(mapping, &pvec);
for (i = 0; i < pagevec_count(&pvec); i++)
unlock_page(pvec.pages[i]);
@@ -639,6 +636,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
+ if (!did_range_unmap && page_mapped(page)) {
+ /*
+ * If page is mapped, before taking its lock,
+ * zap the rest of the file in one hit.
+ */
+ unmap_mapping_pages(mapping, index,
+ (1 + end - index), false);
+ did_range_unmap = 1;
+ }
+
lock_page(page);
WARN_ON(page_to_index(page) != index);
if (page->mapping != mapping) {
@@ -646,23 +653,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
wait_on_page_writeback(page);
- if (page_mapped(page)) {
- if (!did_range_unmap) {
- /*
- * Zap the rest of the file in one hit.
- */
- unmap_mapping_pages(mapping, index,
- (1 + end - index), false);
- did_range_unmap = 1;
- } else {
- /*
- * Just zap this page
- */
- unmap_mapping_pages(mapping, index,
- 1, false);
- }
- }
+
+ if (page_mapped(page))
+ unmap_mapping_page(page);
BUG_ON(page_mapped(page));
+
ret2 = do_launder_page(mapping, page);
if (ret2 == 0) {
if (!invalidate_complete_page2(mapping, page))
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index be18af481d7d..c7236daa2415 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
if (a && a->status & ATIF_PROBE) {
a->status |= ATIF_PROBE_FAIL;
/*
- * we do not respond to probe or request packets for
+ * we do not respond to probe or request packets of
* this address while we are probing this address
*/
goto unlock;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 789f257be24f..fc8be49010b9 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
if (WARN_ON(!forw_packet->if_outgoing))
return;
- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
+ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
return;
+ }
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
return;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 372e3b25aaa4..7dd51da73845 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3229,7 +3229,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
- bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
+ BT_DBG("pchan %p", pchan);
chan = l2cap_chan_create();
if (!chan)
@@ -3250,7 +3250,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
*/
atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
- bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
+ BT_DBG("created chan %p", chan);
return chan;
}
@@ -3354,7 +3354,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
{
struct smp_dev *smp;
- bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
+ BT_DBG("chan %p", chan);
smp = chan->data;
if (smp) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 7ce8a77cc6b6..e013d33f1c7c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
#endif
struct br_tunnel_info {
- __be64 tunnel_id;
- struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
+ struct metadata_dst __rcu *tunnel_dst;
};
/* private vlan flags */
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index 0d3a8c01552e..01017448ebde 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
br_vlan_tunnel_rht_params);
}
+static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
+{
+ struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
+
+ WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
+ RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
+ dst_release(&tdst->dst);
+}
+
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
- if (!vlan->tinfo.tunnel_dst)
+ if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
return;
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
- vlan->tinfo.tunnel_id = 0;
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
+ vlan_tunnel_info_release(vlan);
}
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan, u32 tun_id)
{
- struct metadata_dst *metadata = NULL;
+ struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
int err;
- if (vlan->tinfo.tunnel_dst)
+ if (metadata)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return -EINVAL;
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
- vlan->tinfo.tunnel_dst = metadata;
- vlan->tinfo.tunnel_id = key;
+ rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
+ WRITE_ONCE(vlan->tinfo.tunnel_id, key);
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return 0;
out:
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
- vlan->tinfo.tunnel_id = 0;
+ vlan_tunnel_info_release(vlan);
return err;
}
@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
+ struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
int err;
- if (!vlan || !vlan->tinfo.tunnel_id)
+ if (!vlan)
return 0;
- if (unlikely(!skb_vlan_tag_present(skb)))
+ tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
+ if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
return 0;
skb_dst_drop(skb);
@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
if (err)
return err;
- skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+ tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
+ if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
+ skb_dst_set(skb, &tunnel_dst->dst);
return 0;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 909b9e684e04..f3e4d9528fa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
struct list_head rx_ops;
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
@@ -133,6 +133,10 @@ struct bcm_sock {
char procname [32]; /* inode number in decimal with \0 */
};
+static LIST_HEAD(bcm_notifier_list);
+static DEFINE_SPINLOCK(bcm_notifier_lock);
+static struct bcm_sock *bcm_busy_notifier;
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = TX_EXPIRED;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
/* this element is not throttled anymore */
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
}
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/*
* notification handler for netdevice status changes
*/
-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
struct sock *sk = &bo->sk;
struct bcm_op *op;
int notify_enodev = 0;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
switch (msg) {
@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
sk->sk_error_report(sk);
}
}
+}
+static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&bcm_notifier_lock);
+ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
+ spin_unlock(&bcm_notifier_lock);
+ bcm_notify(bcm_busy_notifier, msg, dev);
+ spin_lock(&bcm_notifier_lock);
+ }
+ bcm_busy_notifier = NULL;
+ spin_unlock(&bcm_notifier_lock);
return NOTIFY_DONE;
}
@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
INIT_LIST_HEAD(&bo->rx_ops);
/* set notifier */
- bo->notifier.notifier_call = bcm_notifier;
-
- register_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ list_add_tail(&bo->notifier, &bcm_notifier_list);
+ spin_unlock(&bcm_notifier_lock);
return 0;
}
@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
/* remove bcm_ops, timer, rx_unregister(), etc. */
- unregister_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ while (bcm_busy_notifier == bo) {
+ spin_unlock(&bcm_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&bcm_notifier_lock);
+ }
+ list_del(&bo->notifier);
+ spin_unlock(&bcm_notifier_lock);
lock_sock(sk);
@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
.exit = canbcm_pernet_exit,
};
+static struct notifier_block canbcm_notifier = {
+ .notifier_call = bcm_notifier
+};
+
static int __init bcm_module_init(void)
{
int err;
@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
}
register_pernet_subsys(&canbcm_pernet_ops);
+ register_netdevice_notifier(&canbcm_notifier);
return 0;
}
static void __exit bcm_module_exit(void)
{
can_proto_unregister(&bcm_can_proto);
+ unregister_netdevice_notifier(&canbcm_notifier);
unregister_pernet_subsys(&canbcm_pernet_ops);
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 253b24417c8e..be6183f8ca11 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -143,10 +143,14 @@ struct isotp_sock {
u32 force_tx_stmin;
u32 force_rx_stmin;
struct tpcon rx, tx;
- struct notifier_block notifier;
+ struct list_head notifier;
wait_queue_head_t wait;
};
+static LIST_HEAD(isotp_notifier_list);
+static DEFINE_SPINLOCK(isotp_notifier_lock);
+static struct isotp_sock *isotp_busy_notifier;
+
static inline struct isotp_sock *isotp_sk(const struct sock *sk)
{
return (struct isotp_sock *)sk;
@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
- unregister_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ while (isotp_busy_notifier == so) {
+ spin_unlock(&isotp_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&isotp_notifier_lock);
+ }
+ list_del(&so->notifier);
+ spin_unlock(&isotp_notifier_lock);
lock_sock(sk);
@@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
struct sock *sk = &so->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (so->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
sk->sk_error_report(sk);
break;
}
+}
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&isotp_notifier_lock);
+ list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
+ spin_unlock(&isotp_notifier_lock);
+ isotp_notify(isotp_busy_notifier, msg, dev);
+ spin_lock(&isotp_notifier_lock);
+ }
+ isotp_busy_notifier = NULL;
+ spin_unlock(&isotp_notifier_lock);
return NOTIFY_DONE;
}
@@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
init_waitqueue_head(&so->wait);
- so->notifier.notifier_call = isotp_notifier;
- register_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ list_add_tail(&so->notifier, &isotp_notifier_list);
+ spin_unlock(&isotp_notifier_lock);
return 0;
}
@@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
.prot = &isotp_proto,
};
+static struct notifier_block canisotp_notifier = {
+ .notifier_call = isotp_notifier
+};
+
static __init int isotp_module_init(void)
{
int err;
@@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
err = can_proto_register(&isotp_can_proto);
if (err < 0)
pr_err("can: registration of isotp protocol failed\n");
+ else
+ register_netdevice_notifier(&canisotp_notifier);
return err;
}
@@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
static __exit void isotp_module_exit(void)
{
can_proto_unregister(&isotp_can_proto);
+ unregister_netdevice_notifier(&canisotp_notifier);
}
module_init(isotp_module_init);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index e09d087ba240..c3946c355882 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
if ((do_skcb->offset + do_skb->len) < offset_start) {
__skb_unlink(do_skb, &session->skb_queue);
+ /* drop ref taken in j1939_session_skb_queue() */
+ skb_unref(do_skb);
+
kfree_skb(do_skb);
}
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
skcb->flags |= J1939_ECU_LOCAL_SRC;
+ skb_get(skb);
skb_queue_tail(&session->skb_queue, skb);
}
static struct
-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
- unsigned int offset_start)
+sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+ unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *do_skcb;
@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
skb = do_skb;
}
}
+
+ if (skb)
+ skb_get(skb);
+
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
if (!skb)
@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
return skb;
}
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
{
unsigned int offset_start;
offset_start = session->pkt.dpo * 7;
- return j1939_session_skb_find_by_offset(session, offset_start);
+ return j1939_session_skb_get_by_offset(session, offset_start);
}
/* see if we are receiver
@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
int ret = 0;
u8 dat[8];
- se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
netdev_err_once(priv->ndev,
"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
- return -EOVERFLOW;
+ ret = -EOVERFLOW;
+ goto out_free;
}
if (!len) {
@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
if (pkt_done)
j1939_tp_set_rxtimeout(session, 250);
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
return ret;
}
@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
static int j1939_simple_txnext(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
- struct sk_buff *se_skb = j1939_session_skb_find(session);
+ struct sk_buff *se_skb = j1939_session_skb_get(session);
struct sk_buff *skb;
int ret;
@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
return 0;
skb = skb_clone(se_skb, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
can_skb_set_owner(skb, se_skb->sk);
@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
ret = j1939_send_one(priv, skb);
if (ret)
- return ret;
+ goto out_free;
j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
j1939_sk_queue_activate_next(session);
- return 0;
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
+ return ret;
}
static bool j1939_session_deactivate_locked(struct j1939_session *session)
@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
struct sk_buff *skb;
if (!session->transmission) {
- skb = j1939_session_skb_find(session);
+ skb = j1939_session_skb_get(session);
/* distribute among j1939 receivers */
j1939_sk_recv(session->priv, skb);
+ consume_skb(skb);
}
j1939_session_deactivate_activate_next(session);
@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb;
- struct sk_buff *se_skb;
+ struct sk_buff *se_skb = NULL;
const u8 *dat;
u8 *tpdat;
int offset;
@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
goto out_session_cancel;
}
- se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
j1939_tp_set_rxtimeout(session, 250);
}
session->last_cmd = 0xff;
+ consume_skb(se_skb);
j1939_session_put(session);
return;
out_session_cancel:
+ kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
diff --git a/net/can/raw.c b/net/can/raw.c
index 139d9471ddcf..ac96fc210025 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -83,7 +83,7 @@ struct raw_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
int loopback;
int recv_own_msgs;
int fd_frames;
@@ -95,6 +95,10 @@ struct raw_sock {
struct uniqframe __percpu *uniq;
};
+static LIST_HEAD(raw_notifier_list);
+static DEFINE_SPINLOCK(raw_notifier_lock);
+static struct raw_sock *raw_busy_notifier;
+
/* Return pointer to store the extra msg flags for raw_recvmsg().
* We use the space of one unsigned int beyond the 'struct sockaddr_can'
* in skb->cb.
@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
return err;
}
-static int raw_notifier(struct notifier_block *nb,
- unsigned long msg, void *ptr)
+static void raw_notify(struct raw_sock *ro, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
struct sock *sk = &ro->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (ro->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
sk->sk_error_report(sk);
break;
}
+}
+
+static int raw_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+ spin_lock(&raw_notifier_lock);
+ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
+ spin_unlock(&raw_notifier_lock);
+ raw_notify(raw_busy_notifier, msg, dev);
+ spin_lock(&raw_notifier_lock);
+ }
+ raw_busy_notifier = NULL;
+ spin_unlock(&raw_notifier_lock);
return NOTIFY_DONE;
}
@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
return -ENOMEM;
/* set notifier */
- ro->notifier.notifier_call = raw_notifier;
-
- register_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ list_add_tail(&ro->notifier, &raw_notifier_list);
+ spin_unlock(&raw_notifier_lock);
return 0;
}
@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
ro = raw_sk(sk);
- unregister_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ while (raw_busy_notifier == ro) {
+ spin_unlock(&raw_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&raw_notifier_lock);
+ }
+ list_del(&ro->notifier);
+ spin_unlock(&raw_notifier_lock);
lock_sock(sk);
@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
.prot = &raw_proto,
};
+static struct notifier_block canraw_notifier = {
+ .notifier_call = raw_notifier
+};
+
static __init int raw_module_init(void)
{
int err;
@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
err = can_proto_register(&raw_can_proto);
if (err < 0)
pr_err("can: registration of raw protocol failed\n");
+ else
+ register_netdevice_notifier(&canraw_notifier);
return err;
}
@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
static __exit void raw_module_exit(void)
{
can_proto_unregister(&raw_can_proto);
+ unregister_netdevice_notifier(&canraw_notifier);
}
module_init(raw_module_init);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 98f20efbfadf..bf774575ad71 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_lock(&n->lock);
if ((n->nud_state == NUD_FAILED) ||
+ (n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
time_after(tref, n->updated))
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 43b6ac4c4439..9b5a767eddd5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -641,6 +641,18 @@ void __put_net(struct net *net)
}
EXPORT_SYMBOL_GPL(__put_net);
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return &get_net(container_of(ns, struct net, ns))->ns;
+}
+EXPORT_SYMBOL_GPL(get_net_ns);
+
struct net *get_net_ns_by_fd(int fd)
{
struct file *file;
@@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
fput(file);
return net;
}
-
-#else
-struct net *get_net_ns_by_fd(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
+#endif
struct net *get_net_ns_by_pid(pid_t pid)
{
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3e84279c4123..ec931b080156 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4842,10 +4842,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
if (err < 0)
goto errout;
- if (!skb->len) {
- err = -EINVAL;
+ /* Notification info is only filled for bridge ports, not the bridge
+ * device itself. Therefore, a zero notification length is valid and
+ * should not result in an error.
+ */
+ if (!skb->len)
goto errout;
- }
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ad22870298c..bbc3b4b62032 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1253,6 +1253,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
struct sock *sk = skb->sk;
struct sk_buff_head *q;
unsigned long flags;
+ bool is_zerocopy;
u32 lo, hi;
u16 len;
@@ -1267,6 +1268,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
len = uarg->len;
lo = uarg->id;
hi = uarg->id + len - 1;
+ is_zerocopy = uarg->zerocopy;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
@@ -1274,7 +1276,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
serr->ee.ee_data = hi;
serr->ee.ee_info = lo;
- if (!uarg->zerocopy)
+ if (!is_zerocopy)
serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
q = &sk->sk_error_queue;
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 2a6733a6449a..5d38e90895ac 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
if (dev->sfp_bus)
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
- if (ops->get_module_info)
+ if (ops->get_module_eeprom_by_page)
return ops->get_module_eeprom_by_page(dev, page_data, extack);
return -EOPNOTSUPP;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 3fa7a394eabf..baa5d10043cb 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
if (eeprom.offset + eeprom.len > total_len)
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
return -EFAULT;
test.len = test_len;
- data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
+ data = kcalloc(test_len, sizeof(u64), GFP_USER);
if (!data)
return -ENOMEM;
@@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
ret = ops->get_tunable(dev, &tuna, data);
@@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_phy_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
if (phy_drv_tunable) {
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index b3029fff715d..2d51b7ab4dc5 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
int len = 0;
int ret;
+ len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
+
for (i = 0; i < ETH_SS_COUNT; i++) {
const struct strset_info *set_info = &data->sets[i];
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f17870ee558b..2f94d221c00e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
return err;
}
- if (!inet_sk(sk)->inet_num && inet_autobind(sk))
+ if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->connect(sk, uaddr, addr_len);
}
@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
- if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
+ if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bfaf327e9d12..e0480c6cebaa 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
kfree(doi_def->map.std->lvl.local);
kfree(doi_def->map.std->cat.cipso);
kfree(doi_def->map.std->cat.local);
+ kfree(doi_def->map.std);
break;
}
kfree(doi_def);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e35f68da40a..1c6429c353a9 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 7b6931a4d775..752e392083e6 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
+ /* if we don't have a source address at this point, fall back to the
+ * dummy address instead of sending out a packet with a source address
+ * of 0.0.0.0
+ */
+ if (!fl4.saddr)
+ fl4.saddr = htonl(INADDR_DUMMY);
+
icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 7b272bbed2b4..6b3c558a4f23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
+ ip_mc_clear_src(i);
ip_ma_put(i);
}
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1c9f71a37258..95a718397fd1 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
struct sock *sk;
struct net *net = dev_net(skb->dev);
struct icmphdr *icmph = icmp_hdr(skb);
+ bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
- if (skb2)
- ping_queue_rcv_skb(sk, skb2);
+ if (skb2 && !ping_queue_rcv_skb(sk, skb2))
+ rc = true;
sock_put(sk);
- return true;
}
- pr_debug("no socket, dropping\n");
- return false;
+ if (!rc)
+ pr_debug("no socket, dropping\n");
+
+ return rc;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f6787c55f6ab..6a36ac98476f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2056,6 +2056,19 @@ martian_source:
return err;
}
+/* get device for dst_alloc with local routes */
+static struct net_device *ip_rt_get_dev(struct net *net,
+ const struct fib_result *res)
+{
+ struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
+ struct net_device *dev = NULL;
+
+ if (nhc)
+ dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
+
+ return dev ? : net->loopback_dev;
+}
+
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
@@ -2212,7 +2225,7 @@ local_input:
}
}
- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type,
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
if (!rth)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 15f5504adf5b..1307ad0d3b9e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2607,6 +2607,9 @@ void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -2857,10 +2860,17 @@ int udp_abort(struct sock *sk, int err)
{
lock_sock(sk);
+ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
+ * with close()
+ */
+ if (sock_flag(sk, SOCK_DEAD))
+ goto out;
+
sk->sk_err = err;
sk->sk_error_report(sk);
__udp_disconnect(sk, 0);
+out:
release_sock(sk);
return 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b0ef65eb9bd2..701eb82acd1c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET6_TOKEN]) {
err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index e204163c7036..92f3235fa287 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
}
EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
+static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
+{
+ if (likely(next != IPPROTO_ICMPV6))
+ return false;
+
+ if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
+ return false;
+
+ return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
+}
+
void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
- nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
- nft_fib_store_result(dest, priv, nft_in(pkt));
- return;
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+ nft_hook(pkt) == NF_INET_INGRESS) {
+ if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+ nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+ nft_fib_store_result(dest, priv, nft_in(pkt));
+ return;
+ }
}
*dest = 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 199b080d418a..3fcd86f4dfdc 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1c572c8daced..6201965bd822 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1066,11 +1066,6 @@ out_error:
goto partial_message;
}
- if (skb_has_frag_list(head)) {
- kfree_skb_list(skb_shinfo(head)->frag_list);
- skb_shinfo(head)->frag_list = NULL;
- }
-
if (head != kcm->seq_skb)
kfree_skb(head);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 9245c0421bda..fc34ae2b604c 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
+ int ret;
rtnl_lock();
+ wiphy_lock(local->hw.wiphy);
__ieee80211_suspend(&local->hw, NULL);
- __ieee80211_resume(&local->hw);
+ ret = __ieee80211_resume(&local->hw);
+ wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
return count;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 214404a558fb..648696b49f89 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1442,7 +1442,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON_ONCE(!chanctx_conf)) {
+ if (!chanctx_conf) {
rcu_read_unlock();
return NULL;
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 2e2f73a4aa73..137fa4c50e07 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -476,14 +476,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
GFP_KERNEL);
}
- /* APs need special treatment */
if (sdata->vif.type == NL80211_IFTYPE_AP) {
- struct ieee80211_sub_if_data *vlan, *tmpsdata;
-
- /* down all dependent devices, that is VLANs */
- list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
- u.vlan.list)
- dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
} else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/* remove all packets in parent bc_buf pointing to this dev */
@@ -641,6 +634,15 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* close all dependent VLAN interfaces before locking wiphy */
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ struct ieee80211_sub_if_data *vlan, *tmpsdata;
+
+ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+ u.vlan.list)
+ dev_close(vlan->dev);
+ }
+
wiphy_lock(sdata->local->hw.wiphy);
ieee80211_do_stop(sdata, true);
wiphy_unlock(sdata->local->hw.wiphy);
@@ -1591,6 +1593,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
+ if (!list_empty(&sdata->u.ap.vlans))
+ return -EBUSY;
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_OCB:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 62145e5f9628..f33a3acd7f96 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
struct ieee80211_sub_if_data *sdata;
+ int ret;
/* wait for scan work complete */
flush_workqueue(local->workqueue);
@@ -301,8 +302,12 @@ static void ieee80211_restart_work(struct work_struct *work)
/* wait for all packet processing to be done */
synchronize_net();
- ieee80211_reconfig(local);
+ ret = ieee80211_reconfig(local);
wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2480bd0577bb..3f2aad2e7436 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4062,10 +4062,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (elems.mbssid_config_ie)
bss_conf->profile_periodicity =
elems.mbssid_config_ie->profile_periodicity;
+ else
+ bss_conf->profile_periodicity = 0;
if (elems.ext_capab_len >= 11 &&
(elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
bss_conf->ema_ap = true;
+ else
+ bss_conf->ema_ap = false;
/* continue assoc process */
ifmgd->assoc_data->timeout = jiffies;
@@ -5802,12 +5806,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 3)
sdata->vif.bss_conf.profile_periodicity = elem->data[2];
+ else
+ sdata->vif.bss_conf.profile_periodicity = 0;
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 11 &&
(elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
sdata->vif.bss_conf.ema_ap = true;
+ else
+ sdata->vif.bss_conf.ema_ap = false;
} else {
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 6487b05da6fa..a6f3fb4a9197 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1514,7 +1514,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
return;
- if (time_is_before_jiffies(mi->sample_time))
+ if (time_is_after_jiffies(mi->sample_time))
return;
mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1bb43edd47b6..af0ef456eb0f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2240,17 +2240,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (is_multicast_ether_addr(hdr->addr1)) {
- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
- goto out_no_led;
- }
-
if (rx->sta)
cache = &rx->sta->frags;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
+ if (is_multicast_ether_addr(hdr->addr1))
+ return RX_DROP_MONITOR;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -2376,7 +2374,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
out:
ieee80211_led_rx(rx->local);
- out_no_led:
if (rx->sta)
rx->sta->rx_stats.packets++;
return RX_CONTINUE;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d4cc9ac2d703..6b50cb5e0e3c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+
+ if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control) &&
+ !ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return;
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
- if (skb->len < 15)
- return;
- } else if (skb->len < 24 ||
- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
- !ieee80211_is_beacon(mgmt->frame_control)))
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon);
+ }
+
+ if (skb->len < min_hdr_len)
return;
sdata1 = rcu_dereference(local->scan_sdata);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0b719f3d2dec..2651498d05e8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2014,6 +2014,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
ieee80211_tx(sdata, sta, skb, false);
}
+static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *)skb->data;
+
+ /* check for not even having the fixed radiotap header part */
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ return false; /* too short to be possibly valid */
+
+ /* is it a header version we can trust to find length from? */
+ if (unlikely(rthdr->it_version))
+ return false; /* only version 0 is supported */
+
+ /* does the skb contain enough to deliver on the alleged length? */
+ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+ return false; /* skb too short for claimed rt header extent */
+
+ return true;
+}
+
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2022,8 +2042,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[info->band];
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
u16 txflags;
@@ -2036,17 +2054,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
u8 vht_mcs = 0, vht_nss = 0;
int i;
- /* check for not even having the fixed radiotap header part */
- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
- return false; /* too short to be possibly valid */
-
- /* is it a header version we can trust to find length from? */
- if (unlikely(rthdr->it_version))
- return false; /* only version 0 is supported */
-
- /* does the skb contain enough to deliver on the alleged length? */
- if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
- return false; /* skb too short for claimed rt header extent */
+ if (!ieee80211_validate_radiotap_len(skb))
+ return false;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_DONTFRAG;
@@ -2186,6 +2195,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
return false;
if (rate_found) {
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[info->band];
+
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -2199,7 +2211,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
vht_nss);
- } else {
+ } else if (sband) {
for (i = 0; i < sband->n_bitrates; i++) {
if (rate * 5 != sband->bitrates[i].bitrate)
continue;
@@ -2236,8 +2248,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
- /* Sanity-check and process the injection radiotap header */
- if (!ieee80211_parse_tx_radiotap(skb, dev))
+ /* Sanity-check the length of the radiotap header */
+ if (!ieee80211_validate_radiotap_len(skb))
goto fail;
/* we now know there is a radiotap header with a length we can use */
@@ -2351,6 +2363,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
ieee80211_select_queue_80211(sdata, skb, hdr);
skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
+ /*
+ * Process the radiotap header. This will now take into account the
+ * selected chandef above to accurately set injection rates and
+ * retransmissions.
+ */
+ if (!ieee80211_parse_tx_radiotap(skb, dev))
+ goto fail_rcu;
+
/* remove the injection radiotap header */
skb_pull(skb, len_rthdr);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0a0481f5af48..060059ef9668 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -947,7 +947,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
switch (elem->data[0]) {
case WLAN_EID_EXT_HE_MU_EDCA:
- if (len == sizeof(*elems->mu_edca_param_set)) {
+ if (len >= sizeof(*elems->mu_edca_param_set)) {
elems->mu_edca_param_set = data;
if (crc)
*crc = crc32_be(*crc, (void *)elem,
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
}
break;
case WLAN_EID_EXT_UORA:
- if (len == 1)
+ if (len >= 1)
elems->uora_element = data;
break;
case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->max_channel_switch_time = data;
break;
case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len == sizeof(*elems->mbssid_config_ie))
+ if (len >= sizeof(*elems->mbssid_config_ie))
elems->mbssid_config_ie = data;
break;
case WLAN_EID_EXT_HE_SPR:
@@ -985,7 +985,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len == sizeof(*elems->he_6ghz_capa))
+ if (len >= sizeof(*elems->he_6ghz_capa))
elems->he_6ghz_capa = data;
break;
}
@@ -1074,14 +1074,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
switch (id) {
case WLAN_EID_LINK_ID:
- if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
elem_parse_failed = true;
break;
}
elems->lnk_id = (void *)(pos - 2);
break;
case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
elem_parse_failed = true;
break;
}
@@ -1244,7 +1244,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->sec_chan_offs = (void *)pos;
break;
case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen !=
+ if (elen <
sizeof(*elems->mesh_chansw_params_ie)) {
elem_parse_failed = true;
break;
@@ -1253,7 +1253,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
break;
case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
if (!action ||
- elen != sizeof(*elems->wide_bw_chansw_ie)) {
+ elen < sizeof(*elems->wide_bw_chansw_ie)) {
elem_parse_failed = true;
break;
}
@@ -1272,7 +1272,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
pos, elen);
if (ie) {
- if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
+ if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
elems->wide_bw_chansw_ie =
(void *)(ie + 2);
else
@@ -1316,7 +1316,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->cisco_dtpc_elem = pos;
break;
case WLAN_EID_ADDBA_EXT:
- if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
elem_parse_failed = true;
break;
}
@@ -1342,7 +1342,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem, elems);
break;
case WLAN_EID_S1G_CAPABILITIES:
- if (elen == sizeof(*elems->s1g_capab))
+ if (elen >= sizeof(*elems->s1g_capab))
elems->s1g_capab = (void *)pos;
else
elem_parse_failed = true;
@@ -2178,8 +2178,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
list_for_each_entry(ctx, &local->chanctx_list, list)
ctx->driver_present = false;
mutex_unlock(&local->chanctx_mtx);
-
- cfg80211_shutdown_all_interfaces(local->hw.wiphy);
}
static void ieee80211_assign_chanctx(struct ieee80211_local *local,
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 6b825fb3fa83..9b263f27ce9b 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -356,6 +356,8 @@ void mptcp_get_options(const struct sk_buff *skb,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 5edc686faff1..632350018fb6 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -280,11 +280,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
/* try to fetch required memory from subflow */
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- if (ssk->sk_forward_alloc < skb->truesize)
- goto drop;
- __sk_mem_reclaim(ssk, skb->truesize);
- if (!sk_rmem_schedule(sk, skb, skb->truesize))
+ int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
+
+ if (ssk->sk_forward_alloc < amount)
goto drop;
+
+ ssk->sk_forward_alloc -= amount;
+ sk->sk_forward_alloc += amount;
}
/* the skb map_seq accounts for the skb offset:
@@ -668,18 +670,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
if (inet_sk_state_load(sk) == TCP_CLOSE)
- return;
-
- mptcp_data_lock(sk);
+ return false;
__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
__mptcp_ofo_queue(msk);
+ if (unlikely(ssk->sk_err)) {
+ if (!sock_owned_by_user(sk))
+ __mptcp_error_report(sk);
+ else
+ set_bit(MPTCP_ERROR_REPORT, &msk->flags);
+ }
/* If the moves have caught up with the DATA_FIN sequence number
* it's time to ack the DATA_FIN and change socket state, but
@@ -688,7 +694,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
*/
if (mptcp_pending_data_fin(sk, NULL))
mptcp_schedule_work(sk);
- mptcp_data_unlock(sk);
+ return moved > 0;
}
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
@@ -696,7 +702,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
int sk_rbuf, ssk_rbuf;
- bool wake;
/* The peer can send data while we are shutting down this
* subflow at msk destruction time, but we must avoid enqueuing
@@ -705,28 +710,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (unlikely(subflow->disposable))
return;
- /* move_skbs_to_msk below can legitly clear the data_avail flag,
- * but we will need later to properly woke the reader, cache its
- * value
- */
- wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
- if (wake)
- set_bit(MPTCP_DATA_READY, &msk->flags);
-
ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
if (unlikely(ssk_rbuf > sk_rbuf))
sk_rbuf = ssk_rbuf;
- /* over limit? can't append more skbs to msk */
+ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
- goto wake;
-
- move_skbs_to_msk(msk, ssk);
+ return;
-wake:
- if (wake)
+ /* Wake-up the reader only for in-sequence data */
+ mptcp_data_lock(sk);
+ if (move_skbs_to_msk(msk, ssk)) {
+ set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
+ }
+ mptcp_data_unlock(sk);
}
static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
@@ -858,7 +857,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
sock_owned_by_me(sk);
mptcp_for_each_subflow(msk, subflow) {
- if (subflow->data_avail)
+ if (READ_ONCE(subflow->data_avail))
return mptcp_subflow_tcp_sock(subflow);
}
@@ -1955,6 +1954,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
mptcp_data_unlock(sk);
tcp_cleanup_rbuf(ssk, moved);
+
+ if (unlikely(ssk->sk_err))
+ __mptcp_error_report(sk);
unlock_sock_fast(ssk, slowpath);
} while (!done);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 0c6f99c67345..385796f0ef19 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -362,7 +362,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
enum mptcp_data_avail {
MPTCP_SUBFLOW_NODATA,
MPTCP_SUBFLOW_DATA_AVAIL,
- MPTCP_SUBFLOW_OOO_DATA
};
struct mptcp_delegated_action {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index ef3d037f984a..be1de4084196 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -784,10 +784,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
}
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
- ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
@@ -812,13 +812,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
/* Mapping covers data later in the subflow stream,
* currently unsupported.
*/
- warn_bad_map(subflow, ssn);
+ dbg_bad_map(subflow, ssn);
return false;
}
if (unlikely(!before(ssn, subflow->map_subflow_seq +
subflow->map_data_len))) {
/* Mapping does covers past subflow data, invalid */
- warn_bad_map(subflow, ssn + skb->len);
+ dbg_bad_map(subflow, ssn);
return false;
}
return true;
@@ -1000,7 +1000,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue))
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
if (subflow->data_avail)
return true;
@@ -1039,18 +1039,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
ack_seq);
- if (ack_seq == old_ack) {
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- break;
- } else if (after64(ack_seq, old_ack)) {
- subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
- break;
+ if (unlikely(before64(ack_seq, old_ack))) {
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ continue;
}
- /* only accept in-sequence mapping. Old values are spurious
- * retransmission
- */
- mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ break;
}
return true;
@@ -1065,12 +1060,11 @@ fallback:
* subflow_error_report() will introduce the appropriate barriers
*/
ssk->sk_err = EBADMSG;
- ssk->sk_error_report(ssk);
tcp_set_state(ssk, TCP_CLOSE);
subflow->reset_transient = 0;
subflow->reset_reason = MPTCP_RST_EMPTCP;
tcp_send_active_reset(ssk, GFP_ATOMIC);
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
return false;
}
@@ -1080,7 +1074,7 @@ fallback:
subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
return true;
}
@@ -1092,7 +1086,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
if (subflow->map_valid &&
mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
subflow->map_valid = 0;
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
pr_debug("Done with mapping: seq=%u data_len=%u",
subflow->map_subflow_seq,
@@ -1120,41 +1114,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
*full_space = tcp_full_space(sk);
}
-static void subflow_data_ready(struct sock *sk)
-{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- u16 state = 1 << inet_sk_state_load(sk);
- struct sock *parent = subflow->conn;
- struct mptcp_sock *msk;
-
- msk = mptcp_sk(parent);
- if (state & TCPF_LISTEN) {
- /* MPJ subflow are removed from accept queue before reaching here,
- * avoid stray wakeups
- */
- if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
- return;
-
- set_bit(MPTCP_DATA_READY, &msk->flags);
- parent->sk_data_ready(parent);
- return;
- }
-
- WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
- !subflow->mp_join && !(state & TCPF_CLOSE));
-
- if (mptcp_subflow_data_available(sk))
- mptcp_data_ready(parent, sk);
-}
-
-static void subflow_write_space(struct sock *ssk)
-{
- struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
-
- mptcp_propagate_sndbuf(sk, ssk);
- mptcp_write_space(sk);
-}
-
void __mptcp_error_report(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -1195,6 +1154,43 @@ static void subflow_error_report(struct sock *ssk)
mptcp_data_unlock(sk);
}
+static void subflow_data_ready(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ u16 state = 1 << inet_sk_state_load(sk);
+ struct sock *parent = subflow->conn;
+ struct mptcp_sock *msk;
+
+ msk = mptcp_sk(parent);
+ if (state & TCPF_LISTEN) {
+ /* MPJ subflow are removed from accept queue before reaching here,
+ * avoid stray wakeups
+ */
+ if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+ return;
+
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+ parent->sk_data_ready(parent);
+ return;
+ }
+
+ WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+ !subflow->mp_join && !(state & TCPF_CLOSE));
+
+ if (mptcp_subflow_data_available(sk))
+ mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
+}
+
+static void subflow_write_space(struct sock *ssk)
+{
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+ mptcp_propagate_sndbuf(sk, ssk);
+ mptcp_write_space(sk);
+}
+
static struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{
@@ -1505,6 +1501,8 @@ static void subflow_state_change(struct sock *sk)
*/
if (mptcp_subflow_data_available(sk))
mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b100c04a0e43..3d6d49420db8 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
int length = (th->doff * 4) - sizeof(*th);
u8 buf[40], *ptr;
+ if (unlikely(length < 0))
+ return false;
+
ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
if (ptr == NULL)
return false;
@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
length--;
continue;
default:
+ if (length < 2)
+ return true;
opsize = *ptr++;
if (opsize < 2)
return true;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 72bc759179ef..bf4d6ec9fc55 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4364,13 +4364,45 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_set_alloc_name(&ctx, set, name);
kfree(name);
if (err < 0)
- goto err_set_alloc_name;
+ goto err_set_name;
+
+ udata = NULL;
+ if (udlen) {
+ udata = set->data + size;
+ nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+ }
+
+ INIT_LIST_HEAD(&set->bindings);
+ INIT_LIST_HEAD(&set->catchall_list);
+ set->table = table;
+ write_pnet(&set->net, net);
+ set->ops = ops;
+ set->ktype = ktype;
+ set->klen = desc.klen;
+ set->dtype = dtype;
+ set->objtype = objtype;
+ set->dlen = desc.dlen;
+ set->flags = flags;
+ set->size = desc.size;
+ set->policy = policy;
+ set->udlen = udlen;
+ set->udata = udata;
+ set->timeout = timeout;
+ set->gc_int = gc_int;
+
+ set->field_count = desc.field_count;
+ for (i = 0; i < desc.field_count; i++)
+ set->field_len[i] = desc.field_len[i];
+
+ err = ops->init(set, &desc, nla);
+ if (err < 0)
+ goto err_set_init;
if (nla[NFTA_SET_EXPR]) {
expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
set->exprs[0] = expr;
set->num_exprs++;
@@ -4381,75 +4413,44 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(flags & NFT_SET_EXPR)) {
err = -EINVAL;
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
err = -E2BIG;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_init;
+ goto err_set_expr_alloc;
}
set->exprs[i++] = expr;
set->num_exprs++;
}
}
- udata = NULL;
- if (udlen) {
- udata = set->data + size;
- nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
- }
-
- INIT_LIST_HEAD(&set->bindings);
- INIT_LIST_HEAD(&set->catchall_list);
- set->table = table;
- write_pnet(&set->net, net);
- set->ops = ops;
- set->ktype = ktype;
- set->klen = desc.klen;
- set->dtype = dtype;
- set->objtype = objtype;
- set->dlen = desc.dlen;
- set->flags = flags;
- set->size = desc.size;
- set->policy = policy;
- set->udlen = udlen;
- set->udata = udata;
- set->timeout = timeout;
- set->gc_int = gc_int;
set->handle = nf_tables_alloc_handle(table);
- set->field_count = desc.field_count;
- for (i = 0; i < desc.field_count; i++)
- set->field_len[i] = desc.field_len[i];
-
- err = ops->init(set, &desc, nla);
- if (err < 0)
- goto err_set_init;
-
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
- goto err_set_trans;
+ goto err_set_expr_alloc;
list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
-err_set_trans:
- ops->destroy(set);
-err_set_init:
+err_set_expr_alloc:
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(&ctx, set->exprs[i]);
-err_set_alloc_name:
+
+ ops->destroy(set);
+err_set_init:
kfree(set->name);
err_set_name:
kvfree(set);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ae906eb4b269..330ba68828e7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2683,7 +2683,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2896,7 +2896,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -3034,10 +3034,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- if (po->tx_ring.pg_vec)
+ /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
+ * tpacket_snd() will redo the check safely.
+ */
+ if (data_race(po->tx_ring.pg_vec))
return tpacket_snd(po, msg);
- else
- return packet_snd(sock, msg, len);
+
+ return packet_snd(sock, msg, len);
}
/*
@@ -3168,7 +3171,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
/* prevents packet_notifier() from calling
* register_prot_hook()
*/
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
@@ -3178,17 +3181,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
}
BUG_ON(po->running);
- po->num = proto;
+ WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
if (unlikely(unlisted)) {
dev_put(dev);
po->prot_hook.dev = NULL;
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
po->prot_hook.dev = dev;
- po->ifindex = dev ? dev->ifindex : 0;
+ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
}
}
@@ -3502,7 +3505,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
uaddr->sa_family = AF_PACKET;
memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
@@ -3517,16 +3520,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
+ int ifindex;
if (peer)
return -EOPNOTSUPP;
+ ifindex = READ_ONCE(po->ifindex);
sll->sll_family = AF_PACKET;
- sll->sll_ifindex = po->ifindex;
- sll->sll_protocol = po->num;
+ sll->sll_ifindex = ifindex;
+ sll->sll_protocol = READ_ONCE(po->num);
sll->sll_pkttype = 0;
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
@@ -4105,7 +4110,7 @@ static int packet_notifier(struct notifier_block *this,
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@@ -4411,7 +4416,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
was_running = po->running;
num = po->num;
if (was_running) {
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
@@ -4446,7 +4451,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
spin_lock(&po->bind_lock);
if (was_running) {
- po->num = num;
+ WRITE_ONCE(po->num, num);
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
@@ -4616,8 +4621,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
s,
refcount_read(&s->sk_refcnt),
s->sk_type,
- ntohs(po->num),
- po->ifindex,
+ ntohs(READ_ONCE(po->num)),
+ READ_ONCE(po->ifindex),
po->running,
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c0477bec09bd..f2efaa4225f9 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -436,7 +436,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
- unsigned int size;
+ size_t size;
unsigned int ver;
size_t hdrlen;
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 4db109fb6ec2..5b426dc3634d 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (rds_cmsg_recv(inc, msg, rs)) {
ret = -EFAULT;
- goto out;
+ break;
}
rds_recvmsg_zcookie(rs, msg);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 18edd9ad1410..a656baa321fe 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
}
err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
- if (err == NF_ACCEPT &&
- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
- if (maniptype == NF_NAT_MANIP_SRC)
- maniptype = NF_NAT_MANIP_DST;
- else
- maniptype = NF_NAT_MANIP_SRC;
-
- err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+ if (ct->status & IPS_SRC_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ct_nat_execute(skb, ct, ctinfo, range,
+ maniptype);
+ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ err = ct_nat_execute(skb, ct, ctinfo, NULL,
+ NF_NAT_MANIP_SRC);
+ }
}
return err;
#else
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7d37638ee1c7..951542843cab 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
}
tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
- if (!tcph)
+ if (!tcph || tcph->doff < 5)
return NULL;
return skb_header_pointer(skb, offset,
@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
/* List of known Diffserv codepoints:
*
- * Least Effort (CS1)
+ * Least Effort (CS1, LE)
* Best Effort (CS0)
* Max Reliability & LLT "Lo" (TOS1)
* Max Throughput (TOS2)
@@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Total 25 codepoints.
*/
-/* List of traffic classes in RFC 4594:
+/* List of traffic classes in RFC 4594, updated by RFC 8622:
* (roughly descending order of contended priority)
* (roughly ascending order of uncontended throughput)
*
@@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Ops, Admin, Management (CS2,TOS1) - eg. ssh
* Standard Service (CS0 & unrecognised codepoints)
* High Throughput Data (AF1x,TOS2) - eg. web traffic
- * Low Priority Data (CS1) - eg. BitTorrent
+ * Low Priority Data (CS1,LE) - eg. BitTorrent
* Total 12 traffic classes.
*/
@@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
* Video Streaming (AF4x, AF3x, CS3)
* Bog Standard (CS0 etc.)
* High Throughput (AF1x, TOS2)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 8 traffic classes.
*/
@@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
* Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
* Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
* Best Effort (CS0, AF1x, TOS2, and those not specified)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 4 traffic classes.
*/
@@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
static int cake_config_diffserv3(struct Qdisc *sch)
{
/* Simplified Diffserv structure with 3 tins.
- * Low Priority (CS1)
+ * Low Priority (CS1, LE)
* Best Effort
* Latency Sensitive (TOS4, VA, EF, CS6, CS7)
*/
diff --git a/net/socket.c b/net/socket.c
index 27e3e7d53f8e..4f2c6d2795d0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
* what to do with it - that's up to the protocol still.
*/
-/**
- * get_net_ns - increment the refcount of the network namespace
- * @ns: common namespace (net)
- *
- * Returns the net's common namespace.
- */
-
-struct ns_common *get_net_ns(struct ns_common *ns)
-{
- return &get_net(container_of(ns, struct net, ns))->ns;
-}
-EXPORT_SYMBOL_GPL(get_net_ns);
-
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5a31307ceb76..5d1192ceb139 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
+
+ skpair = unix_peer(sk);
+ unix_peer(sk) = NULL;
+
unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
- skpair = unix_peer(sk);
-
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
- unix_peer(sk) = NULL;
}
/* Try to flush out this socket. Throw out buffers at least */
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 2eee93985ab0..af590ae606b6 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
@$(kecho) " GEN $@"
@(echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
- cat $^ ; \
+ echo | cat - $^ ; \
echo '};'; \
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) > $@
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6fbf7537faf5..8d0883e81093 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1340,6 +1340,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
rdev->devlist_generation++;
wdev->registered = true;
+ if (wdev->netdev &&
+ sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
+ "phy80211"))
+ pr_err("failed to add phy80211 symlink to netdev!\n");
+
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
}
@@ -1365,14 +1370,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
if (ret)
goto out;
- if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
- "phy80211")) {
- pr_err("failed to add phy80211 symlink to netdev!\n");
- unregister_netdevice(dev);
- ret = -EINVAL;
- goto out;
- }
-
cfg80211_register_wdev(rdev, wdev);
ret = 0;
out:
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index 6bdd96408022..d245968b74cb 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -334,6 +334,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
struct sk_buff *msg;
void *hdr;
@@ -364,9 +365,20 @@ free_msg:
nlmsg_free(msg);
free_request:
spin_lock_bh(&wdev->pmsr_lock);
- list_del(&req->list);
+ /*
+ * cfg80211_pmsr_process_abort() may have already moved this request
+ * to the free list, and will free it later. In this case, don't free
+ * it here.
+ */
+ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
+ if (tmp == req) {
+ list_del(&req->list);
+ to_free = req;
+ break;
+ }
+ }
spin_unlock_bh(&wdev->pmsr_lock);
- kfree(req);
+ kfree(to_free);
}
EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9b959e3b09c6..0c3f05c9be27 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
wiphy_unlock(&rdev->wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(&rdev->wiphy);
+
rtnl_unlock();
return ret;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7ec021a610ae..18dba3d7c638 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1059,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_MESH_POINT:
/* mesh should be handled? */
break;
+ case NL80211_IFTYPE_OCB:
+ cfg80211_leave_ocb(rdev, dev);
+ break;
default:
break;
}
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index f9b19524da11..1e9baa5c4fc6 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
Elf32_Word const *symtab_shndx)
{
unsigned long offset;
+ unsigned short shndx = w2(sym->st_shndx);
int index;
- if (sym->st_shndx != SHN_XINDEX)
- return w2(sym->st_shndx);
+ if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
+ return shndx;
- offset = (unsigned long)sym - (unsigned long)symtab;
- index = offset / sizeof(*sym);
+ if (shndx == SHN_XINDEX) {
+ offset = (unsigned long)sym - (unsigned long)symtab;
+ index = offset / sizeof(*sym);
- return w(symtab_shndx[index]);
+ return w(symtab_shndx[index]);
+ }
+
+ return 0;
}
static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 1645e4142e30..9863be6fd43e 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
return err;
}
spin_lock_irq(&tmr->lock);
- tmr->timeri = t;
+ if (tmr->timeri)
+ err = -EBUSY;
+ else
+ tmr->timeri = t;
spin_unlock_irq(&tmr->lock);
+ if (err < 0) {
+ snd_timer_close(t);
+ snd_timer_instance_free(t);
+ return err;
+ }
return 0;
}
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index e0faa6601966..5805c5de39fb 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -804,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
static inline void cancel_stream(struct amdtp_stream *s)
{
s->packet_index = -1;
- if (current_work() == &s->period_work)
+ if (in_interrupt())
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 43e37145eb5d..ab5113cccffa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6568,6 +6568,7 @@ enum {
ALC285_FIXUP_HP_SPECTRE_X360,
ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+ ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8146,6 +8147,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC283_FIXUP_HEADSET_MIC,
},
+ [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8182,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8310,6 +8321,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8328,10 +8341,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -8737,6 +8752,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 87f5709fe2cc..4a50b169fe03 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
NULL, 0),
- SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
- NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+ 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+ NULL, 0),
SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
RT5659_PWR_VREF3_BIT, 0, NULL, 0),
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
/* Input Side */
- SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
- 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
static int rt5659_probe(struct snd_soc_component *component)
{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
rt5659->component = component;
+ switch (rt5659->pdata.jd_src) {
+ case RT5659_JD_HDA_HEADER:
+ break;
+
+ default:
+ snd_soc_dapm_new_controls(dapm,
+ rt5659_particular_dapm_widgets,
+ ARRAY_SIZE(rt5659_particular_dapm_widgets));
+ break;
+ }
+
return 0;
}
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index fed80c8f994f..e78ba3b064c4 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
index 81866aeb3fbf..55b2a1f52ca3 100644
--- a/sound/soc/codecs/tas2562.h
+++ b/sound/soc/codecs/tas2562.h
@@ -57,13 +57,13 @@
#define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
#define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
#define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
#define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index c62bfd1c3ac7..4f55b316cf0f 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
/* Initialize sound card */
priv->pdev = pdev;
priv->card.dev = &pdev->dev;
+ priv->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&priv->card, "model");
if (ret) {
snprintf(priv->name, sizeof(priv->name), "%s-audio",
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 28c7497344e3..a6e95db6b3fb 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+ /*
+ * Ensure LRCLK is disabled even in device node validation.
+ * Will not impact if disabled in lpass_cpu_daiops_trigger()
+ * suspend.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+ else
+ regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+ /*
+ * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+ * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+ * lpass_cpu_daiops_prepare.
+ */
+ if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+ drvdata->mi2s_was_prepared[dai->driver->id] = false;
+ clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+ }
+
clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
}
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled during
+ * device resume as lpass_cpu_daiops_prepare() is not called
+ * after the device resumes. We don't check mi2s_was_prepared before
+ * enable/disable BCLK in trigger events because:
+ * 1. These trigger events are paired, so the BCLK
+ * enable_count is balanced.
+ * 2. the BCLK can be shared (ex: headset and headset mic),
+ * we need to increase the enable_count so that we don't
+ * turn off the shared BCLK while other devices are using
+ * it.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /*
+ * To ensure lpass BCLK/LRCLK is disabled during
+ * device suspend.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
+ int ret;
+
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+ * data flow starts. This allows other codec to have some delay before
+ * the data flow.
+ * (ex: to drop start up pop noise before capture starts).
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+ else
+ ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+ if (ret) {
+ dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+ * be called multiple times. It's paired with the clk_disable in
+ * lpass_cpu_daiops_shutdown.
+ */
+ if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+ ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+ if (ret) {
+ dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+ return ret;
+ }
+ drvdata->mi2s_was_prepared[dai->driver->id] = true;
+ }
+ return 0;
+}
+
const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
.set_sysclk = lpass_cpu_daiops_set_sysclk,
.startup = lpass_cpu_daiops_startup,
.shutdown = lpass_cpu_daiops_shutdown,
.hw_params = lpass_cpu_daiops_hw_params,
.trigger = lpass_cpu_daiops_trigger,
+ .prepare = lpass_cpu_daiops_prepare,
};
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 83b2e08ade06..7f72214404ba 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -67,6 +67,10 @@ struct lpass_data {
/* MI2S SD lines to use for playback/capture */
unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+ /* The state of MI2S prepare dai_ops was called */
+ bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
int hdmi_port_enable;
/* low-power audio interface (LPAIF) registers */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c0904acb935..a76974ccfce1 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
return NULL;
name = devm_kstrdup(dev, devname, GFP_KERNEL);
+ if (!name)
+ return NULL;
/* are we a "%s.%d" name (platform and SPI components) */
found = strstr(name, dev->driver->name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 73076d425efb..4893a56208e0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
* @src: older version of pcm as a source
* @pcm: latest version of pcm created from the source
*
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
*/
static int pcm_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
* @src: old version of phyical link config as a source
* @link: latest version of physical link config created from the source
*
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
*/
static int link_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
* @src: old version of manifest as a source
* @manifest: latest version of manifest created from the source
*
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
*/
static int manifest_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_manifest *src,
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index fd265803f7bc..c83fb6255961 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -256,6 +256,7 @@ suspend:
/* reset FW state */
sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+ sdev->enabled_cores_mask = 0;
return ret;
}
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
index 078cbd2ba651..de7f30f99af3 100644
--- a/tools/bootconfig/include/linux/bootconfig.h
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -4,4 +4,8 @@
#include "../../../../include/linux/bootconfig.h"
+#ifndef fallthrough
+# define fallthrough
+#endif
+
#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 7362bef1a368..6cd6080cac04 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
}
/* TODO: Ensure the @path is initramfs/initrd image */
if (fstat(fd, &stat) < 0) {
+ ret = -errno;
pr_err("Failed to get the size of %s\n", path);
goto out;
}
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 6061431ee04c..e9b619aa0cdf 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -1094,7 +1094,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_put_ctx;
}
if (xsk->fd == umem->fd)
- umem->rx_ring_setup_done = true;
+ umem->tx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 24295d39713b..523aa4157f80 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -747,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC)
+ continue;
+
if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
continue;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 743c2e9d0f56..41bca1d13d8e 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -717,7 +717,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
{
- struct section *symtab;
+ struct section *symtab, *symtab_shndx;
struct symbol *sym;
Elf_Data *data;
Elf_Scn *s;
@@ -769,6 +769,29 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
symtab->len += data->d_size;
symtab->changed = true;
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx) {
+ s = elf_getscn(elf->elf, symtab_shndx->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return NULL;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+ data->d_size = sizeof(Elf32_Word);
+ data->d_align = 4;
+ data->d_type = ELF_T_WORD;
+
+ symtab_shndx->len += 4;
+ symtab_shndx->changed = true;
+ }
+
sym->sec = find_section_by_index(elf, 0);
elf_add_symbol(elf, sym);
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 22eb31e48ca7..2f9948b3d943 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -11,9 +11,9 @@ compare_number()
second_num=$2
# upper bound is first_num * 110%
- upper=$(( $first_num + $first_num / 10 ))
+ upper=$(expr $first_num + $first_num / 10 )
# lower bound is first_num * 90%
- lower=$(( $first_num - $first_num / 10 ))
+ lower=$(expr $first_num - $first_num / 10 )
if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
echo "The difference between $first_num and $second_num are greater than 10%."
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 3ff4936a15a4..da19be7da284 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -776,10 +776,10 @@ static int machine__process_ksymbol_register(struct machine *machine,
if (dso) {
dso->kernel = DSO_SPACE__KERNEL;
map = map__new2(0, dso);
+ dso__put(dso);
}
if (!dso || !map) {
- dso__put(dso);
return -ENOMEM;
}
@@ -792,6 +792,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
maps__insert(&machine->kmaps, map);
+ map__put(map);
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8336dd8e8098..d3cf2dee36c8 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
return false;
}
-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
{
if (!ev1->pmu_name || !ev2->pmu_name)
- return false;
+ return true;
return !strcmp(ev1->pmu_name, ev2->pmu_name);
}
@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader &&
- evsel_same_pmu(ev->leader, metric_events[i]->leader))
+ evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
@@ -1073,16 +1073,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
ret = resolve_metric(d->metric_no_group,
d->metric_list, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
*(d->has_match) = true;
- return *d->ret;
+out:
+ *(d->ret) = ret;
+ return ret;
}
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 106b3d60881a..e59242c361ce 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
+ buf += hdr_sz;
rest = event->header.size - hdr_sz;
if (readn(fd, buf, rest) != (ssize_t)rest)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 1512092e1e68..3a9e332c5e36 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
}
- if (test->insn_processed) {
+ if (!unpriv && test->insn_processed) {
uint32_t insn_processed;
char *proc;
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
index ca8fdb1b3f01..7d7ebee5cc7a 100644
--- a/tools/testing/selftests/bpf/verifier/and.c
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -61,6 +61,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R1 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0
},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index 8a1caf46ffbc..e061e8799ce2 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -508,6 +508,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -528,6 +530,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -569,6 +573,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -589,6 +595,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -609,6 +617,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -674,6 +684,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -695,6 +707,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index 17fe33a75034..2c8935b3e65d 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -8,6 +8,8 @@
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index bd5cae4a7f73..1c857b2fbdf0 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -87,6 +87,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -150,6 +152,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -213,6 +217,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -280,6 +286,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -348,6 +356,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -416,6 +426,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -484,6 +496,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -552,6 +566,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -620,6 +636,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -688,6 +706,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -756,6 +776,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
index 8dcd4e0383d5..11fc68da735e 100644
--- a/tools/testing/selftests/bpf/verifier/jset.c
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -82,8 +82,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.retval = 1,
.result = ACCEPT,
},
@@ -141,7 +141,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -162,6 +163,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index bd436df5cc32..111801aea5e3 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -420,6 +420,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R7 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,
},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index 7ae2859d495c..a3e593ddfafc 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -120,7 +120,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R2 pointer comparison prohibited",
.retval = 0,
},
{
@@ -159,7 +159,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
// fake-dead code; targeted from branch A to
- // prevent dead code sanitization
+ // prevent dead code sanitization, rejected
+ // via branch B however
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -167,7 +168,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
.retval = 0,
},
{
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index fcd8e3855111..35739567189e 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -43,6 +43,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_4K,
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_P47V64_4K,
NUM_VM_MODES,
};
@@ -60,7 +61,7 @@ enum vm_guest_mode {
#elif defined(__s390x__)
-#define VM_MODE_DEFAULT VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT VM_MODE_P47V64_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 16)
@@ -285,10 +286,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[]);
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[]);
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[]);
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 1c4753fff19e..82171f17c1d7 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
- vm = vm_create_with_vcpus(mode, nr_vcpus,
+ vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
guest_num_pages, 0, guest_code, NULL);
/* Align down GPA of the testing memslot */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 28e528c19d28..a2b732cf96ea 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -82,7 +82,7 @@ int kvm_check_cap(long cap)
kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
- TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+ TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
" rc: %i errno: %i", ret, errno);
close(kvm_fd);
@@ -175,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
+ [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@@ -192,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 40, 48, 0x1000, 12 },
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
+ { 47, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -277,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif
break;
+ case VM_MODE_P47V64_4K:
+ vm->pgtable_levels = 5;
+ break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
@@ -308,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ * nr_vcpus - VCPU count
+ * slot0_mem_pages - Slot0 physical memory size
+ * extra_mem_pages - Non-slot0 physical memory total size
+ * num_percpu_pages - Per-cpu physical memory pages
+ * guest_code - Guest entry point
+ * vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[])
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
{
+ uint64_t vcpu_pages, extra_pg_pages, pages;
+ struct kvm_vm *vm;
+ int i;
+
+ /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+ if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+ slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
/* The maximum page table size for a memory region will be when the
* smallest pages are used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
- uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
- uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
- uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages;
- struct kvm_vm *vm;
- int i;
+ vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -354,8 +388,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[])
{
- return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
- num_percpu_pages, guest_code, vcpuids);
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+ extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
}
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index abf381800a59..7397ca299835 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -69,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
- vm = vm_create_with_vcpus(mode, vcpus,
+ vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
0, guest_code, NULL);
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 6ad6c8276b2e..af1031fed97f 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -166,75 +166,75 @@ size_t get_def_hugetlb_pagesz(void)
return 0;
}
+#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
+#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
+
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
{
- static const int anon_flags = MAP_PRIVATE | MAP_ANONYMOUS;
- static const int anon_huge_flags = anon_flags | MAP_HUGETLB;
-
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
.name = "anonymous",
- .flag = anon_flags,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_THP] = {
.name = "anonymous_thp",
- .flag = anon_flags,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
.name = "anonymous_hugetlb",
- .flag = anon_huge_flags,
+ .flag = ANON_HUGE_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
.name = "anonymous_hugetlb_16kb",
- .flag = anon_huge_flags | MAP_HUGE_16KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
.name = "anonymous_hugetlb_64kb",
- .flag = anon_huge_flags | MAP_HUGE_64KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
.name = "anonymous_hugetlb_512kb",
- .flag = anon_huge_flags | MAP_HUGE_512KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
.name = "anonymous_hugetlb_1mb",
- .flag = anon_huge_flags | MAP_HUGE_1MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
.name = "anonymous_hugetlb_2mb",
- .flag = anon_huge_flags | MAP_HUGE_2MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
.name = "anonymous_hugetlb_8mb",
- .flag = anon_huge_flags | MAP_HUGE_8MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
.name = "anonymous_hugetlb_16mb",
- .flag = anon_huge_flags | MAP_HUGE_16MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
.name = "anonymous_hugetlb_32mb",
- .flag = anon_huge_flags | MAP_HUGE_32MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
.name = "anonymous_hugetlb_256mb",
- .flag = anon_huge_flags | MAP_HUGE_256MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
.name = "anonymous_hugetlb_512mb",
- .flag = anon_huge_flags | MAP_HUGE_512MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
.name = "anonymous_hugetlb_1gb",
- .flag = anon_huge_flags | MAP_HUGE_1GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
.name = "anonymous_hugetlb_2gb",
- .flag = anon_huge_flags | MAP_HUGE_2GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
.name = "anonymous_hugetlb_16gb",
- .flag = anon_huge_flags | MAP_HUGE_16GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
},
[VM_MEM_SRC_SHMEM] = {
.name = "shmem",
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 9307f25d8130..11239652d805 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -267,7 +267,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
TEST_ASSERT(data->hva_slots, "malloc() fail");
- data->vm = vm_create_default(VCPU_ID, 1024, guest_code);
+ data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages);
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 76d9487fb03c..5abe92d55b69 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
ipv4_rt_replace_mpath
}
+# checks that cached input route on VRF port is deleted
+# when VRF is deleted
+ipv4_local_rt_cache()
+{
+ run_cmd "ip addr add 10.0.0.1/32 dev lo"
+ run_cmd "ip netns add test-ns"
+ run_cmd "ip link add veth-outside type veth peer name veth-inside"
+ run_cmd "ip link add vrf-100 type vrf table 1100"
+ run_cmd "ip link set veth-outside master vrf-100"
+ run_cmd "ip link set veth-inside netns test-ns"
+ run_cmd "ip link set veth-outside up"
+ run_cmd "ip link set vrf-100 up"
+ run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
+ run_cmd "ip netns exec test-ns ip link set veth-inside up"
+ run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
+ run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
+ run_cmd "ip link delete vrf-100"
+
+ # if we do not hang test is a success
+ log_test $? 0 "Cached route removed from VRF port device"
+}
+
ipv4_route_test()
{
route_setup
ipv4_rt_add
ipv4_rt_replace
+ ipv4_local_rt_cache
route_cleanup
}
diff --git a/tools/testing/selftests/net/icmp.sh b/tools/testing/selftests/net/icmp.sh
new file mode 100755
index 000000000000..e4b04cd1644a
--- /dev/null
+++ b/tools/testing/selftests/net/icmp.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for checking ICMP response with dummy address instead of 0.0.0.0.
+# Sets up two namespaces like:
+# +----------------------+ +--------------------+
+# | ns1 | v4-via-v6 routes: | ns2 |
+# | | ' | |
+# | +--------+ -> 172.16.1.0/24 -> +--------+ |
+# | | veth0 +--------------------------+ veth0 | |
+# | +--------+ <- 172.16.0.0/24 <- +--------+ |
+# | 172.16.0.1 | | 2001:db8:1::2/64 |
+# | 2001:db8:1::2/64 | | |
+# +----------------------+ +--------------------+
+#
+# And then tries to ping 172.16.1.1 from ns1. This results in a "net
+# unreachable" message being sent from ns2, but there is no IPv4 address set in
+# that address space, so the kernel should substitute the dummy address
+# 192.0.0.8 defined in RFC7600.
+
+NS1=ns1
+NS2=ns2
+H1_IP=172.16.0.1/32
+H1_IP6=2001:db8:1::1
+RT1=172.16.1.0/24
+PINGADDR=172.16.1.1
+RT2=172.16.0.0/24
+H2_IP6=2001:db8:1::2
+
+TMPFILE=$(mktemp)
+
+cleanup()
+{
+ rm -f "$TMPFILE"
+ ip netns del $NS1
+ ip netns del $NS2
+}
+
+trap cleanup EXIT
+
+# Namespaces
+ip netns add $NS1
+ip netns add $NS2
+
+# Connectivity
+ip -netns $NS1 link add veth0 type veth peer name veth0 netns $NS2
+ip -netns $NS1 link set dev veth0 up
+ip -netns $NS2 link set dev veth0 up
+ip -netns $NS1 addr add $H1_IP dev veth0
+ip -netns $NS1 addr add $H1_IP6/64 dev veth0 nodad
+ip -netns $NS2 addr add $H2_IP6/64 dev veth0 nodad
+ip -netns $NS1 route add $RT1 via inet6 $H2_IP6
+ip -netns $NS2 route add $RT2 via inet6 $H1_IP6
+
+# Make sure ns2 will respond with ICMP unreachable
+ip netns exec $NS2 sysctl -qw net.ipv4.icmp_ratelimit=0 net.ipv4.ip_forward=1
+
+# Run the test - a ping runs in the background, and we capture ICMP responses
+# with tcpdump; -c 1 means it should exit on the first ping, but add a timeout
+# in case something goes wrong
+ip netns exec $NS1 ping -w 3 -i 0.5 $PINGADDR >/dev/null &
+ip netns exec $NS1 timeout 10 tcpdump -tpni veth0 -c 1 'icmp and icmp[icmptype] != icmp-echo' > $TMPFILE 2>/dev/null
+
+# Parse response and check for dummy address
+# tcpdump output looks like:
+# IP 192.0.0.8 > 172.16.0.1: ICMP net 172.16.1.1 unreachable, length 92
+RESP_IP=$(awk '{print $2}' < $TMPFILE)
+if [[ "$RESP_IP" != "192.0.0.8" ]]; then
+ echo "FAIL - got ICMP response from $RESP_IP, should be 192.0.0.8"
+ exit 1
+else
+ echo "OK"
+ exit 0
+fi
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 9ca5f1ba461e..2b495dc8d78e 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
ip -net "$ns4" route add default via 10.0.3.2
ip -net "$ns4" route add default via dead:beef:3::2
-# use TCP syn cookies, even if no flooding was detected.
-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
-
set_ethtool_flags() {
local ns="$1"
local dev="$2"
@@ -737,6 +734,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
exit $ret
fi
+ # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
+ # mptcp syncookie support.
+ if [ $sender = $ns1 ]; then
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+ else
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
+ fi
+
run_tests "$ns2" $sender 10.0.1.2
run_tests "$ns2" $sender dead:beef:1::2
run_tests "$ns2" $sender 10.0.2.1
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index a8fa64136282..7f26591f236b 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
readonly BASE="ns-$(mktemp -u XXXXXX)"
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 2fedc0781ce8..11d7cdb898c0 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -18,7 +18,8 @@ ret=0
cleanup() {
local ns
- local -r jobs="$(jobs -p)"
+ local jobs
+ readonly jobs="$(jobs -p)"
[ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
rm -f $STATS
@@ -108,7 +109,7 @@ chk_gro() {
if [ ! -f ../bpf/xdp_dummy.o ]; then
echo "Missing xdp_dummy helper. Build bpf selftest first"
- exit -1
+ exit 1
fi
create_ns
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 3171069a6b46..cd6430b39982 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
new file mode 100755
index 000000000000..6caf6ac8c285
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# This tests the fib expression.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+timeout=4
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+cleanup()
+{
+ ip netns del ${ns1}
+ ip netns del ${ns2}
+ ip netns del ${nsrouter}
+
+ [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+dmesg | grep -q ' nft_rpfilter: '
+if [ $? -eq 0 ]; then
+ dmesg -c | grep ' nft_rpfilter: '
+ echo "WARN: a previous test run has failed" 1>&2
+fi
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+load_ruleset() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ fib saddr . iif oif missing counter log prefix "$netns nft_rpfilter: " drop
+ }
+}
+EOF
+}
+
+load_ruleset_count() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ ip daddr 1.1.1.1 fib saddr . iif oif missing counter drop
+ ip6 daddr 1c3::c01d fib saddr . iif oif missing counter drop
+ }
+}
+EOF
+}
+
+check_drops() {
+ dmesg | grep -q ' nft_rpfilter: '
+ if [ $? -eq 0 ]; then
+ dmesg | grep ' nft_rpfilter: '
+ echo "FAIL: rpfilter did drop packets"
+ return 1
+ fi
+
+ return 0
+}
+
+check_fib_counter() {
+ local want=$1
+ local ns=$2
+ local address=$3
+
+ line=$(ip netns exec ${ns} nft list table inet filter | grep 'fib saddr . iif' | grep $address | grep "packets $want" )
+ ret=$?
+
+ if [ $ret -ne 0 ];then
+ echo "Netns $ns fib counter doesn't match expected packet count of $want for $address" 1>&2
+ ip netns exec ${ns} nft list table inet filter
+ return 1
+ fi
+
+ if [ $want -gt 0 ]; then
+ echo "PASS: fib expression did drop packets for $address"
+ fi
+
+ return 0
+}
+
+load_ruleset ${nsrouter}
+load_ruleset ${ns1}
+load_ruleset ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+test_ping() {
+ local daddr4=$1
+ local daddr6=$2
+
+ ip netns exec ${ns1} ping -c 1 -q $daddr4 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr4, ret $ret" 1>&2
+ return 1
+ fi
+
+ ip netns exec ${ns1} ping -c 3 -q $daddr6 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr6, ret $ret" 1>&2
+ return 1
+ fi
+
+ return 0
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+sleep 3
+
+test_ping 10.0.2.1 dead:2::1 || exit 1
+check_drops || exit 1
+
+test_ping 10.0.2.99 dead:2::99 || exit 1
+check_drops || exit 1
+
+echo "PASS: fib expression did not cause unwanted packet drops"
+
+ip netns exec ${nsrouter} nft flush table inet filter
+
+ip -net ${ns1} route del default
+ip -net ${ns1} -6 route del default
+
+ip -net ${ns1} addr del 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr del dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr add dead:2::99/64 dev eth0
+
+ip -net ${ns1} route add default via 10.0.2.1
+ip -net ${ns1} -6 route add default via dead:2::1
+
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth0
+
+# switch to ruleset that doesn't log, this time
+# its expected that this does drop the packets.
+load_ruleset_count ${nsrouter}
+
+# ns1 has a default route, but nsrouter does not.
+# must not check return value, ping to 1.1.1.1 will
+# fail.
+check_fib_counter 0 ${nsrouter} 1.1.1.1 || exit 1
+check_fib_counter 0 ${nsrouter} 1c3::c01d || exit 1
+
+ip netns exec ${ns1} ping -c 1 -W 1 -q 1.1.1.1 > /dev/null
+check_fib_counter 1 ${nsrouter} 1.1.1.1 || exit 1
+
+sleep 2
+ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
+check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+
+exit 0