summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-swap10
-rw-r--r--Documentation/ABI/testing/sysfs-power2
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt12
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt10
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt28
-rw-r--r--Documentation/filesystems/overlayfs.txt5
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/mmu_context.h1
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi9
-rw-r--r--arch/arc/boot/dts/hsdk.dts32
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/hsdk_defconfig3
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h11
-rw-r--r--arch/arc/kernel/setup.c32
-rw-r--r--arch/arc/plat-axs10x/axs10x.c7
-rw-r--r--arch/arc/plat-hsdk/Kconfig3
-rw-r--r--arch/arc/plat-hsdk/platform.c42
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts6
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts19
-rw-r--r--arch/arm/boot/dts/da850-evm.dts7
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts11
-rw-r--r--arch/arm/boot/dts/stm32429i-eval.dts5
-rw-r--r--arch/arm/boot/dts/stm32f4-pinctrl.dtsi343
-rw-r--r--arch/arm/boot/dts/stm32f429-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f429-pinctrl.dtsi95
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi297
-rw-r--r--arch/arm/boot/dts/stm32f469-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f469-pinctrl.dtsi96
-rw-r--r--arch/arm/configs/gemini_defconfig3
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/mach-at91/pm.c4
-rw-r--r--arch/arm/mach-omap2/hsmmc.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts19
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi72
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m32r/kernel/traps.c9
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/loongson32/common/platform.c38
-rw-r--r--arch/mips/math-emu/cp1emu.c2
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rwxr-xr-xarch/mips/tools/generic-board-config.sh6
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S24
-rw-r--r--arch/powerpc/kernel/mce_power.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c13
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S45
-rw-r--r--arch/powerpc/kernel/watchdog.c30
-rw-r--r--arch/powerpc/kvm/book3s_xive.c5
-rw-r--r--arch/powerpc/kvm/book3s_xive.h1
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c39
-rw-r--r--arch/powerpc/platforms/powernv/setup.c10
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c17
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c8
-rw-r--r--arch/powerpc/sysdev/xive/common.c8
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c4
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7264.h4
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h4
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h8
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/x86/entry/entry_32.S4
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/events/intel/core.c11
-rw-r--r--arch/x86/events/intel/uncore.c12
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/hyperv/mmu.c57
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/kvm_para.h4
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h8
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h33
-rw-r--r--arch/x86/kernel/amd_nb.c41
-rw-r--r--arch/x86/kernel/apic/apic.c15
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c27
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c19
-rw-r--r--arch/x86/kernel/head32.c5
-rw-r--r--arch/x86/kernel/kprobes/common.h13
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/kvm.c14
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/unwind_frame.c38
-rw-r--r--arch/x86/kernel/unwind_orc.c29
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/mmu.c17
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/mm/Makefile11
-rw-r--r--arch/x86/mm/mmap.c12
-rw-r--r--arch/x86/mm/tlb.c105
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--block/bio.c26
-rw-r--r--block/blk-mq-debugfs.c6
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/bsg-lib.c27
-rw-r--r--crypto/shash.c10
-rw-r--r--crypto/skcipher.c17
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/acpi/arm64/iort.c35
-rw-r--r--drivers/acpi/property.c29
-rw-r--r--drivers/android/binder.c93
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/base/node.c12
-rw-r--r--drivers/base/property.c19
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/zram/zram_drv.c36
-rw-r--r--drivers/clk/clk-bulk.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c15
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c15
-rw-r--r--drivers/dma-buf/sync_file.c17
-rw-r--r--drivers/dma/altera-msgdma.c37
-rw-r--r--drivers/dma/edma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c3
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpio-omap.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c20
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c19
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c7
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c110
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/hv/channel_mgmt.c37
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/xgene-hwmon.c19
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-scan-pci.c13
-rw-r--r--drivers/ide/setup-pci.c63
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-raid.c11
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/misc/cxl/cxllib.c13
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c23
-rw-r--r--drivers/misc/mei/pci-txe.c30
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c36
-rw-r--r--drivers/mmc/core/queue.c125
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c26
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.c24
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c145
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c32
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h2
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c2
-rw-r--r--drivers/pci/host/pci-tegra.c22
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c14
-rw-r--r--drivers/rapidio/devices/tsi721.c7
-rw-r--r--drivers/rapidio/rio-access.c40
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-hsdk.c (renamed from drivers/reset/reset-hsdk-v1.c)44
-rw-r--r--drivers/rpmsg/qcom_glink_native.c14
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/libiscsi.c8
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c16
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/tty/tty_ldisc.c11
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/configfs.c15
-rw-r--r--drivers/usb/gadget/configfs.h11
-rw-r--r--drivers/usb/gadget/function/f_rndis.c12
-rw-r--r--drivers/usb/gadget/function/u_rndis.h1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c9
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/serial/console.c3
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--fs/9p/vfs_addr.c10
-rw-r--r--fs/binfmt_misc.c56
-rw-r--r--fs/binfmt_script.c17
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/ceph/mds_client.c11
-rw-r--r--fs/ceph/snap.c8
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/exec.c2
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/mpage.c14
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c3
-rw-r--r--fs/nfs/nfs4idmap.c2
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/nfs/nfs4xdr.c4
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/dir.c20
-rw-r--r--fs/overlayfs/namei.c1
-rw-r--r--fs/overlayfs/overlayfs.h1
-rw-r--r--fs/overlayfs/ovl_entry.h3
-rw-r--r--fs/overlayfs/readdir.c6
-rw-r--r--fs/overlayfs/super.c27
-rw-r--r--fs/overlayfs/util.c24
-rw-r--r--fs/quota/dquot.c27
-rw-r--r--fs/userfaultfd.c66
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h27
-rw-r--r--fs/xfs/xfs_acl.c22
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c26
-rw-r--r--fs/xfs/xfs_bmap_util.h13
-rw-r--r--fs/xfs/xfs_file.c4
-rw-r--r--fs/xfs/xfs_fsmap.c12
-rw-r--r--fs/xfs/xfs_inode_item.c79
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_ondisk.h2
-rw-r--r--fs/xfs/xfs_reflink.c9
-rw-r--r--include/dt-bindings/reset/snps,hsdk-reset.h17
-rw-r--r--include/dt-bindings/reset/snps,hsdk-v1-reset.h17
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/kernel.h90
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmu_notifier.h5
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/netfilter_bridge/ebtables.h7
-rw-r--r--include/linux/nmi.h121
-rw-r--r--include/linux/of.h10
-rw-r--r--include/linux/sched/mm.h6
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/net/netlink.h73
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/udp.h2
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/sound/hda_verbs.h1
-rw-r--r--include/sound/seq_virmidi.h1
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/netfilter/xt_bpf.h1
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/inode.c1
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/cpuhotplug.c28
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/kcmp.c2
-rw-r--r--kernel/livepatch/core.c60
-rw-r--r--kernel/locking/lockdep.c48
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/params.c35
-rw-r--r--kernel/power/suspend.c18
-rw-r--r--kernel/rcu/tree.c4
-rw-r--r--kernel/sched/fair.c140
-rw-r--r--kernel/sched/features.h3
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/smpboot.c25
-rw-r--r--kernel/sysctl.c24
-rw-r--r--kernel/trace/ftrace.c14
-rw-r--r--kernel/watchdog.c643
-rw-r--r--kernel/watchdog_hld.c196
-rw-r--r--lib/Kconfig.debug147
-rw-r--r--lib/idr.c4
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lz4/lz4_decompress.c4
-rw-r--r--lib/ratelimit.c4
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c13
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/list_lru.c12
-rw-r--r--mm/madvise.c19
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/memory.c2
-rw-r--r--mm/memory_hotplug.c7
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/oom_kill.c16
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/page_vma_mapped.c28
-rw-r--r--mm/rodata_test.c2
-rw-r--r--mm/slab_common.c22
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c52
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/z3fold.c10
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/bridge/netfilter/ebtable_filter.c4
-rw-r--r--net/bridge/netfilter/ebtable_nat.c4
-rw-r--r--net/bridge/netfilter/ebtables.c17
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/sock.c7
-rw-r--r--net/dsa/slave.c31
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inetpeer.c4
-rw-r--r--net/ipv4/ip_gre.c12
-rw-r--r--net/ipv4/ip_input.c25
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c48
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/udp.c36
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c5
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/l2tp/l2tp_core.c16
-rw-r--r--net/l2tp/l2tp_core.h6
-rw-r--r--net/l2tp/l2tp_eth.c51
-rw-r--r--net/l2tp/l2tp_ppp.c8
-rw-r--r--net/netfilter/ipset/ip_set_core.c29
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_tables_api.c10
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/netfilter/xt_bpf.c22
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/sctp/sctp_diag.c4
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/msg.c10
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_input.c6
-rw-r--r--net/xfrm/xfrm_state.c4
-rw-r--r--net/xfrm/xfrm_user.c1
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/faddr2line5
-rw-r--r--scripts/kallsyms.c2
-rw-r--r--scripts/spelling.txt33
-rw-r--r--security/smack/smack_lsm.c55
-rw-r--r--sound/core/compress_offload.c3
-rw-r--r--sound/core/pcm_compat.c1
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/core/seq/seq_ports.c7
-rw-r--r--sound/core/seq/seq_virmidi.c27
-rw-r--r--sound/pci/asihpi/hpioctl.c12
-rw-r--r--sound/pci/echoaudio/echoaudio.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c21
-rw-r--r--sound/usb/caiaq/device.c12
-rw-r--r--sound/usb/card.c20
-rw-r--r--sound/usb/line6/driver.c7
-rw-r--r--sound/usb/line6/podhd.c8
-rw-r--r--sound/usb/mixer.c12
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--sound/usb/usx2y/usb_stream.c6
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/perf/builtin-script.c4
-rw-r--r--tools/perf/util/callchain.c6
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/pmu.c56
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/testing/selftests/mqueue/Makefile4
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c2
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c25
-rw-r--r--tools/testing/selftests/x86/Makefile2
570 files changed, 5390 insertions, 3363 deletions
diff --git a/.mailmap b/.mailmap
index 5273cfd70ad6..c7b10caecc4e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -68,6 +68,8 @@ Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com>
+James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
+James Hogan <jhogan@kernel.org> <james@albanarts.com>
James Ketrenos <jketreno@io.(none)>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap
index 587db52084c7..94672016c268 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-swap
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
still used for tmpfs etc. other users. If set to
false, the global swap readahead algorithm will be
used for all swappable pages.
-
-What: /sys/kernel/mm/swap/vma_ra_max_order
-Date: August 2017
-Contact: Linux memory management mailing list <linux-mm@kvack.org>
-Description: The max readahead size in order for VMA based swap readahead
-
- VMA based swap readahead algorithm will readahead at
- most 1 << max_order pages for each readahead. The
- real readahead size for each readahead will be scaled
- according to the estimation algorithm.
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 713cab1d5f12..a1d1612f3651 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -127,7 +127,7 @@ Description:
What; /sys/power/pm_trace_dev_match
Date: October 2010
-Contact: James Hogan <james@albanarts.com>
+Contact: James Hogan <jhogan@kernel.org>
Description:
The /sys/power/pm_trace_dev_match file contains the name of the
device associated with the last PM event point saved in the RTC
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 4a0a7469fdd7..32df07e29f68 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -344,3 +344,4 @@ Version History
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
+1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A')
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
index b878a1e305af..ed1456f5c94d 100644
--- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
@@ -16,11 +16,13 @@ Required Properties:
- clocks:
Array of clocks required for SDHC.
- Require at least input clock for Xenon IP core.
+ Require at least input clock for Xenon IP core. For Armada AP806 and
+ CP110, the AXI clock is also mandatory.
- clock-names:
Array of names corresponding to clocks property.
The input clock for Xenon IP core should be named as "core".
+ The input clock for the AXI bus must be named as "axi".
- reg:
* For "marvell,armada-3700-sdhci", two register areas.
@@ -106,8 +108,8 @@ Example:
compatible = "marvell,armada-ap806-sdhci";
reg = <0xaa0000 0x1000>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
- clocks = <&emmc_clk>;
- clock-names = "core";
+ clocks = <&emmc_clk>,<&axi_clk>;
+ clock-names = "core", "axi";
bus-width = <4>;
marvell,xenon-phy-slow-mode;
marvell,xenon-tun-count = <11>;
@@ -126,8 +128,8 @@ Example:
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
vqmmc-supply = <&sd_vqmmc_regulator>;
vmmc-supply = <&sd_vmmc_regulator>;
- clocks = <&sdclk>;
- clock-names = "core";
+ clocks = <&sdclk>, <&axi_clk>;
+ clock-names = "core", "axi";
bus-width = <4>;
marvell,xenon-tun-count = <9>;
};
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index 7e2dad08a12e..1814fa13f6ab 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -21,8 +21,9 @@ Required properties:
- main controller clock (for both armada-375-pp2 and armada-7k-pp2)
- GOP clock (for both armada-375-pp2 and armada-7k-pp2)
- MG clock (only for armada-7k-pp2)
-- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
- "mg_clk" (the latter only for armada-7k-pp2).
+ - AXI clock (only for armada-7k-pp2)
+- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
+ and "axi_clk" (the 2 latter only for armada-7k-pp2).
The ethernet ports are represented by subnodes. At least one port is
required.
@@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2:
cpm_ethernet: ethernet@0 {
compatible = "marvell,armada-7k-pp22";
reg = <0x0 0x100000>, <0x129000 0xb000>;
- clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
- clock-names = "pp_clk", "gop_clk", "gp_clk";
+ clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
+ <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
+ clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
eth0: eth0 {
interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 6af8eed1adeb..9c16ee2965a2 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -4,6 +4,7 @@ The device node has following properties.
Required properties:
- compatible: should be "rockchip,<name>-gamc"
+ "rockchip,rk3128-gmac": found on RK312x SoCs
"rockchip,rk3228-gmac": found on RK322x SoCs
"rockchip,rk3288-gmac": found on RK3288 SoCs
"rockchip,rk3328-gmac": found on RK3328 SoCs
diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
new file mode 100644
index 000000000000..830069b1c37c
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
@@ -0,0 +1,28 @@
+Binding for the Synopsys HSDK reset controller
+
+This binding uses the common reset binding[1].
+
+[1] Documentation/devicetree/bindings/reset/reset.txt
+
+Required properties:
+- compatible: should be "snps,hsdk-reset".
+- reg: should always contain 2 pairs address - length: first for reset
+ configuration register and second for corresponding SW reset and status bits
+ register.
+- #reset-cells: from common reset binding; Should always be set to 1.
+
+Example:
+ reset: reset@880 {
+ compatible = "snps,hsdk-reset";
+ #reset-cells = <1>;
+ reg = <0x8A0 0x4>, <0xFF0 0x4>;
+ };
+
+Specifying reset lines connected to IP modules:
+ ethernet@.... {
+ ....
+ resets = <&reset HSDK_V1_ETH_RESET>;
+ ....
+ };
+
+The index could be found in <dt-bindings/reset/snps,hsdk-reset.h>
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 36f528a7fdd6..8caa60734647 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
beneath or above the path of another overlay lower layer path.
Using an upper layer path and/or a workdir path that are already used by
-another overlay mount is not allowed and will fail with EBUSY. Using
+another overlay mount is not allowed and may fail with EBUSY. Using
partially overlapping paths is not allowed but will not fail with EBUSY.
+If files are accessed from two overlayfs mounts which share or overlap the
+upper layer and/or workdir path the behavior of the overlay is undefined,
+though it will not result in a crash or deadlock.
Mounting an overlay using an upper layer path, where the upper layer path
was previously used by another mounted overlay in combination with a
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 0500193434cb..d47702456926 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@ Supported adapters:
* Intel Gemini Lake (SOC)
* Intel Cannon Lake-H (PCH)
* Intel Cannon Lake-LP (PCH)
+ * Intel Cedar Fork (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 57f52cdce32e..9ba04c0bab8d 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
and packet type ID), so in a "gatewayed" configuration, all
outgoing traffic will generally use the same device. Incoming
traffic may also end up on a single device, but that is
- dependent upon the balancing policy of the peer's 8023.ad
+ dependent upon the balancing policy of the peer's 802.3ad
implementation. In a "local" configuration, traffic will be
distributed across the devices in the bond.
diff --git a/MAINTAINERS b/MAINTAINERS
index 65b0c88d5ee0..a74227ad082e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5259,7 +5259,8 @@ S: Maintained
F: drivers/iommu/exynos-iommu.c
EZchip NPS platform support
-M: Noam Camus <noamc@ezchip.com>
+M: Elad Kanfi <eladkan@mellanox.com>
+M: Vineet Gupta <vgupta@synopsys.com>
S: Supported
F: arch/arc/plat-eznps
F: arch/arc/boot/dts/eznps.dts
@@ -5345,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: include/linux/fcntl.h
-F: include/linux/fs.h
F: include/uapi/linux/fcntl.h
-F: include/uapi/linux/fs.h
F: fs/fcntl.c
F: fs/locks.c
@@ -5356,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/*
+F: include/linux/fs.h
+F: include/uapi/linux/fs.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
@@ -6738,7 +6739,7 @@ F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
F: drivers/auxdisplay/img-ascii-lcd.c
IMGTEC IR DECODER DRIVER
-M: James Hogan <james.hogan@imgtec.com>
+M: James Hogan <jhogan@kernel.org>
S: Maintained
F: drivers/media/rc/img-ir/
@@ -7562,7 +7563,7 @@ F: arch/arm64/include/asm/kvm*
F: arch/arm64/kvm/
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
-M: James Hogan <james.hogan@imgtec.com>
+M: James Hogan <jhogan@kernel.org>
L: linux-mips@linux-mips.org
S: Supported
F: arch/mips/include/uapi/asm/kvm*
@@ -7570,7 +7571,7 @@ F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
-M: Alexander Graf <agraf@suse.com>
+M: Paul Mackerras <paulus@ozlabs.org>
L: kvm-ppc@vger.kernel.org
W: http://www.linux-kvm.org/
T: git git://github.com/agraf/linux-2.6.git
@@ -8264,6 +8265,12 @@ L: libertas-dev@lists.infradead.org
S: Orphan
F: drivers/net/wireless/marvell/libertas/
+MARVELL MACCHIATOBIN SUPPORT
+M: Russell King <rmk@armlinux.org.uk>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
+
MARVELL MV643XX ETHERNET DRIVER
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: netdev@vger.kernel.org
@@ -8885,7 +8892,7 @@ F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
T: git git://linuxtv.org/media_tree.git
METAG ARCHITECTURE
-M: James Hogan <james.hogan@imgtec.com>
+M: James Hogan <jhogan@kernel.org>
L: linux-metag@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git
S: Odd Fixes
@@ -9354,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD)
M: Josef Bacik <jbacik@fb.com>
S: Maintained
L: linux-block@vger.kernel.org
-L: nbd-general@lists.sourceforge.net
+L: nbd@other.debian.org
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
F: include/uapi/linux/nbd.h
@@ -12931,9 +12938,9 @@ F: drivers/mmc/host/dw_mmc*
SYNOPSYS HSDK RESET CONTROLLER DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
S: Supported
-F: drivers/reset/reset-hsdk-v1.c
-F: include/dt-bindings/reset/snps,hsdk-v1-reset.h
-F: Documentation/devicetree/bindings/reset/snps,hsdk-v1-reset.txt
+F: drivers/reset/reset-hsdk.c
+F: include/dt-bindings/reset/snps,hsdk-reset.h
+F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
SYSTEM CONFIGURATION (SYSCON)
M: Lee Jones <lee.jones@linaro.org>
diff --git a/Makefile b/Makefile
index c0f723f81c06..369eea08a572 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1aafb4efbb51..d789a89cb32c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
-config ARCH_WANT_RELAX_ORDER
- bool
-
config ARCH_HAS_REFCOUNT
bool
help
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 384bd47b5187..45c020a0fe76 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -8,6 +8,7 @@
*/
#include <linux/mm_types.h>
+#include <linux/sched.h>
#include <asm/machvec.h>
#include <asm/compiler.h>
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index a598641eed98..c84e67fdea09 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -24,7 +24,7 @@ config ARC
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
- select HAVE_FUTEX_CMPXCHG
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_KRETPROBES
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 3a4b52b7e09d..d37f49d6a27f 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,8 +6,6 @@
# published by the Free Software Foundation.
#
-UTS_MACHINE := arc
-
ifeq ($(CROSS_COMPILE),)
ifndef CONFIG_CPU_BIG_ENDIAN
CROSS_COMPILE := arc-linux-
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 2367a67c5f10..e114000a84f5 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -44,7 +44,14 @@
mmcclk: mmcclk {
compatible = "fixed-clock";
- clock-frequency = <50000000>;
+ /*
+ * DW sdio controller has external ciu clock divider
+ * controlled via register in SDIO IP. It divides
+ * sdio_ref_clk (which comes from CGU) by 16 for
+ * default. So default mmcclk clock (which comes
+ * to sdk_in) is 25000000 Hz.
+ */
+ clock-frequency = <25000000>;
#clock-cells = <0>;
};
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 229d13adbce4..8adde1b492f1 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -12,6 +12,7 @@
/dts-v1/;
#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/reset/snps,hsdk-reset.h>
/ {
model = "snps,hsdk";
@@ -57,10 +58,10 @@
};
};
- core_clk: core-clk {
+ input_clk: input-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
- clock-frequency = <500000000>;
+ clock-frequency = <33333333>;
};
cpu_intc: cpu-interrupt-controller {
@@ -102,6 +103,19 @@
ranges = <0x00000000 0xf0000000 0x10000000>;
+ cgu_rst: reset-controller@8a0 {
+ compatible = "snps,hsdk-reset";
+ #reset-cells = <1>;
+ reg = <0x8A0 0x4>, <0xFF0 0x4>;
+ };
+
+ core_clk: core-clk@0 {
+ compatible = "snps,hsdk-core-pll-clock";
+ reg = <0x00 0x10>, <0x14B8 0x4>;
+ #clock-cells = <0>;
+ clocks = <&input_clk>;
+ };
+
serial: serial@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
@@ -120,7 +134,17 @@
mmcclk_ciu: mmcclk-ciu {
compatible = "fixed-clock";
- clock-frequency = <100000000>;
+ /*
+ * DW sdio controller has external ciu clock divider
+ * controlled via register in SDIO IP. Due to its
+ * unexpected default value (it should devide by 1
+ * but it devides by 8) SDIO IP uses wrong clock and
+ * works unstable (see STAR 9001204800)
+ * So add temporary fix and change clock frequency
+ * from 100000000 to 12500000 Hz until we fix dw sdio
+ * driver itself.
+ */
+ clock-frequency = <12500000>;
#clock-cells = <0>;
};
@@ -141,6 +165,8 @@
clocks = <&gmacclk>;
clock-names = "stmmaceth";
phy-handle = <&phy0>;
+ resets = <&cgu_rst HSDK_ETH_RESET>;
+ reset-names = "stmmaceth";
mdio {
#address-cells = <1>;
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 6980b966a364..ec7c849a5c8e 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 2233f5777a71..63d3cf69e0b0 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 30a3d4cf53d2..f613ecac14a7 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 821a2e562f3f..3507be2af6fe 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 9a3fcf446388..15f0f6b5fec1 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -63,6 +63,7 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_RESET_HSDK=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
@@ -72,7 +73,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index c0d6a010751a..4fcf4f2503f6 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 5c0971787acf..7b71464f6c2f 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index ba8e802dba80..b1c56d35f2a9 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -98,6 +98,7 @@
/* Auxiliary registers */
#define AUX_IDENTITY 4
+#define AUX_EXEC_CTRL 8
#define AUX_INTR_VEC_BASE 0x25
#define AUX_VOL 0x5e
@@ -135,12 +136,12 @@ struct bcr_identity {
#endif
};
-struct bcr_isa {
+struct bcr_isa_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
- pad1:11, atomic1:1, ver:8;
+ pad1:12, ver:8;
#else
- unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
+ unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
ldd:1, pad2:4, div_rem:4;
#endif
};
@@ -263,13 +264,13 @@ struct cpuinfo_arc {
struct cpuinfo_arc_mmu mmu;
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
- struct bcr_isa isa;
+ struct bcr_isa_arcv2 isa;
const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
- fpu_sp:1, fpu_dp:1, pad2:6,
+ fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 877cec8f5ea2..fb83844daeea 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
+ { 0x54, "R4.0" },
#endif
{ 0x00, NULL }
};
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = {
#else
{ 0x40, "ARC EM" },
{ 0x50, "ARC HS38" },
+ { 0x54, "ARC HS48" },
#endif
{ 0x00, "Unknown" }
};
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void)
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
+ struct bcr_isa_arcv2 isa;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
- READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
if (cpu->core.family == tbl->id) {
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void)
}
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
- if ((cpu->core.family & 0xF0) == tbl->id)
+ if ((cpu->core.family & 0xF4) == tbl->id)
break;
}
cpu->name = tbl->str;
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte;
+
+ if (cpu->core.family >= 0x54) {
+ unsigned int exec_ctrl;
+
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+ cpu->extn.dual_iss_exist = 1;
+ cpu->extn.dual_iss_enb = exec_ctrl & 1;
+ }
}
READ_BCR(ARC_REG_AP_BCR, bcr);
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
+ READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
+
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (is_isa_arcompact()) {
- if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
+ if (!isa.ver) /* ISA BCR absent, use Kconfig info */
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
- else
- cpu->isa.atomic = cpu->isa.atomic1;
+ else {
+ /* ARC700_BUILD only has 2 bits of isa info */
+ struct bcr_generic bcr = *(struct bcr_generic *)&isa;
+ cpu->isa.atomic = bcr.info & 1;
+ }
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
/* there's no direct way to distinguish 750 vs. 770 */
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu->name = "ARC750";
+ } else {
+ cpu->isa = isa;
}
}
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
- n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
cpu_id, cpu->name, cpu->details,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
- IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
+ IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
+ IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..cf14ebc36916 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void)
axs10x_enable_gpio_intc_wire();
+ /*
+ * Reset ethernet IP core.
+ * TODO: get rid of this quirk after axs10x reset driver (or simple
+ * reset driver) will be available in upstream.
+ */
+ iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
+
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
axs10x_print_board_ver(CREG_MB_VER, mb);
}
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index 5a6ed5afb009..bd08de4be75e 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -6,4 +6,5 @@
#
menuconfig ARC_SOC_HSDK
- bool "ARC HS Development Kit SOC"
+ bool "ARC HS Development Kit SOC"
+ select CLK_HSDK
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index a2e7fd17e36d..744e62e58788 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,6 +38,42 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
#define CREG_PAE (CREG_BASE + 0x180)
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
+#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
+#define CREG_CORE_IF_CLK_DIV_2 0x1
+#define CGU_BASE ARC_PERIPHERAL_BASE
+#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
+#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
+#define CGU_PLL_STATUS_LOCK BIT(0)
+#define CGU_PLL_STATUS_ERR BIT(1)
+#define CGU_PLL_CTRL_1GHZ 0x3A10
+#define HSDK_PLL_LOCK_TIMEOUT 500
+
+#define HSDK_PLL_LOCKED() \
+ !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
+
+#define HSDK_PLL_ERR() \
+ !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
+
+static void __init hsdk_set_cpu_freq_1ghz(void)
+{
+ u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
+
+ /*
+ * As we set cpu clock which exceeds 500MHz, the divider for the interface
+ * clock must be programmed to div-by-2.
+ */
+ iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
+
+ /* Set cpu clock to 1GHz */
+ iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
+
+ while (!HSDK_PLL_LOCKED() && timeout--)
+ cpu_relax();
+
+ if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
+ pr_err("Failed to setup CPU frequency to 1GHz!");
+}
+
static void __init hsdk_init_early(void)
{
/*
@@ -52,6 +88,12 @@ static void __init hsdk_init_early(void)
/* Really apply settings made above */
writel(1, (void __iomem *) CREG_PAE_UPDATE);
+
+ /*
+ * Setup CPU frequency to 1GHz.
+ * TODO: remove it after smart hsdk pll driver will be introduced.
+ */
+ hsdk_set_cpu_freq_1ghz();
}
static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 7d7ca054c557..e58fab8aec5d 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -36,6 +36,8 @@
phy1 = &usb1_phy;
ethernet0 = &cpsw_emac0;
ethernet1 = &cpsw_emac1;
+ spi0 = &spi0;
+ spi1 = &spi1;
};
cpus {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 9d276af7c539..081fa68b6f98 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -388,6 +388,7 @@
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
status = "okay";
+ slaves = <1>;
};
&davinci_mdio {
@@ -402,11 +403,6 @@
phy-mode = "rmii";
};
-&cpsw_emac1 {
- phy_id = <&davinci_mdio>, <1>;
- phy-mode = "rmii";
-};
-
&phy_sel {
rmii-clock-ext;
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 9c9088c99cc4..60cb084a8d92 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -67,7 +67,10 @@
usb1: ohci@00400000 {
num-ports = <3>;
- atmel,vbus-gpio = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>;
+ atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */
+ &pioA PIN_PA27 GPIO_ACTIVE_HIGH
+ 0
+ >;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb_default>;
status = "okay";
@@ -120,7 +123,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mikrobus2_uart>;
atmel,use-dma-rx;
- atmel-use-dma-tx;
+ atmel,use-dma-tx;
status = "okay";
};
@@ -178,7 +181,7 @@
uart4: serial@fc00c000 {
atmel,use-dma-rx;
atmel,use-dma-tx;
- pinctrl-name = "default";
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mikrobus1_uart>;
status = "okay";
};
@@ -330,7 +333,7 @@
};
pinctrl_led_gpio_default: led_gpio_default {
- pinmux = <PIN_PA27__GPIO>,
+ pinmux = <PIN_PA10__GPIO>,
<PIN_PB1__GPIO>,
<PIN_PA31__GPIO>;
bias-pull-up;
@@ -396,7 +399,7 @@
};
pinctrl_usb_default: usb_default {
- pinmux = <PIN_PA10__GPIO>,
+ pinmux = <PIN_PA27__GPIO>,
<PIN_PD19__GPIO>;
bias-disable;
};
@@ -520,17 +523,17 @@
red {
label = "red";
- gpios = <&pioA PIN_PA27 GPIO_ACTIVE_LOW>;
+ gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>;
};
green {
label = "green";
- gpios = <&pioA PIN_PB1 GPIO_ACTIVE_LOW>;
+ gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>;
};
blue {
label = "blue";
- gpios = <&pioA PIN_PA31 GPIO_ACTIVE_LOW>;
+ gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 67e72bc72e80..c75507922f7d 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -15,6 +15,13 @@
compatible = "ti,da850-evm", "ti,da850";
model = "DA850/AM1808/OMAP-L138 EVM";
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ ethernet0 = &eth0;
+ };
+
soc@1c00000 {
pmx_core: pinmux@14120 {
status = "okay";
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index cf229dfabf61..e62b62875cba 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1817,6 +1817,8 @@
clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
ti,bit-shift = <24>;
reg = <0x1868>;
+ assigned-clocks = <&mcasp3_ahclkx_mux>;
+ assigned-clock-parents = <&abe_24m_fclk>;
};
mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 26c20e1167b9..4acd32a1c4ef 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -144,15 +144,6 @@
io-channel-names = "temp", "bsi", "vbat";
};
- rear_camera: camera@0 {
- compatible = "linux,camera";
-
- module {
- model = "TCM8341MD";
- sensor = <&cam1>;
- };
- };
-
pwm9: dmtimer-pwm {
compatible = "ti,omap-dmtimer-pwm";
#pwm-cells = <3>;
@@ -189,10 +180,8 @@
clock-lanes = <1>;
data-lanes = <0>;
lane-polarity = <0 0>;
- clock-inv = <0>;
/* Select strobe = <1> for back camera, <0> for front camera */
strobe = <1>;
- crc = <0>;
};
};
};
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 97b1c2321ba9..293ecb957227 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -47,6 +47,7 @@
/dts-v1/;
#include "stm32f429.dtsi"
+#include "stm32f429-pinctrl.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
@@ -202,10 +203,8 @@
stmpe1600: stmpe1600@42 {
compatible = "st,stmpe1600";
reg = <0x42>;
- irq-gpio = <&gpioi 8 0>;
- irq-trigger = <3>;
interrupts = <8 3>;
- interrupt-parent = <&exti>;
+ interrupt-parent = <&gpioi>;
interrupt-controller;
wakeup-source;
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
new file mode 100644
index 000000000000..7f3560c0211d
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
+#include <dt-bindings/mfd/stm32f4-rcc.h>
+
+/ {
+ soc {
+ pinctrl: pin-controller {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x40020000 0x3000>;
+ interrupt-parent = <&exti>;
+ st,syscfg = <&syscfg 0x8>;
+ pins-are-numbered;
+
+ gpioa: gpio@40020000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x0 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
+ st,bank-name = "GPIOA";
+ };
+
+ gpiob: gpio@40020400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x400 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
+ st,bank-name = "GPIOB";
+ };
+
+ gpioc: gpio@40020800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x800 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
+ st,bank-name = "GPIOC";
+ };
+
+ gpiod: gpio@40020c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0xc00 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
+ st,bank-name = "GPIOD";
+ };
+
+ gpioe: gpio@40021000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x1000 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
+ st,bank-name = "GPIOE";
+ };
+
+ gpiof: gpio@40021400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x1400 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
+ st,bank-name = "GPIOF";
+ };
+
+ gpiog: gpio@40021800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x1800 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
+ st,bank-name = "GPIOG";
+ };
+
+ gpioh: gpio@40021c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x1c00 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
+ st,bank-name = "GPIOH";
+ };
+
+ gpioi: gpio@40022000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x2000 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
+ st,bank-name = "GPIOI";
+ };
+
+ gpioj: gpio@40022400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x2400 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
+ st,bank-name = "GPIOJ";
+ };
+
+ gpiok: gpio@40022800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x2800 0x400>;
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
+ st,bank-name = "GPIOK";
+ };
+
+ usart1_pins_a: usart1@0 {
+ pins1 {
+ pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
+ bias-disable;
+ };
+ };
+
+ usart3_pins_a: usart3@0 {
+ pins1 {
+ pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
+ bias-disable;
+ };
+ };
+
+ usbotg_fs_pins_a: usbotg_fs@0 {
+ pins {
+ pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
+ <STM32F429_PA11_FUNC_OTG_FS_DM>,
+ <STM32F429_PA12_FUNC_OTG_FS_DP>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ usbotg_fs_pins_b: usbotg_fs@1 {
+ pins {
+ pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
+ <STM32F429_PB14_FUNC_OTG_HS_DM>,
+ <STM32F429_PB15_FUNC_OTG_HS_DP>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ usbotg_hs_pins_a: usbotg_hs@0 {
+ pins {
+ pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
+ <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
+ <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
+ <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
+ <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
+ <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
+ <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
+ <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
+ <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
+ <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
+ <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
+ <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ ethernet_mii: mii@0 {
+ pins {
+ pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
+ <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
+ <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
+ <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
+ <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
+ <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
+ <STM32F429_PA2_FUNC_ETH_MDIO>,
+ <STM32F429_PC1_FUNC_ETH_MDC>,
+ <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
+ <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
+ <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
+ <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
+ <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
+ <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
+ slew-rate = <2>;
+ };
+ };
+
+ adc3_in8_pin: adc@200 {
+ pins {
+ pinmux = <STM32F429_PF10_FUNC_ANALOG>;
+ };
+ };
+
+ pwm1_pins: pwm@1 {
+ pins {
+ pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
+ <STM32F429_PB13_FUNC_TIM1_CH1N>,
+ <STM32F429_PB12_FUNC_TIM1_BKIN>;
+ };
+ };
+
+ pwm3_pins: pwm@3 {
+ pins {
+ pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
+ <STM32F429_PB5_FUNC_TIM3_CH2>;
+ };
+ };
+
+ i2c1_pins: i2c1@0 {
+ pins {
+ pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
+ <STM32F429_PB6_FUNC_I2C1_SCL>;
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <3>;
+ };
+ };
+
+ ltdc_pins: ltdc@0 {
+ pins {
+ pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
+ <STM32F429_PI13_FUNC_LCD_VSYNC>,
+ <STM32F429_PI14_FUNC_LCD_CLK>,
+ <STM32F429_PI15_FUNC_LCD_R0>,
+ <STM32F429_PJ0_FUNC_LCD_R1>,
+ <STM32F429_PJ1_FUNC_LCD_R2>,
+ <STM32F429_PJ2_FUNC_LCD_R3>,
+ <STM32F429_PJ3_FUNC_LCD_R4>,
+ <STM32F429_PJ4_FUNC_LCD_R5>,
+ <STM32F429_PJ5_FUNC_LCD_R6>,
+ <STM32F429_PJ6_FUNC_LCD_R7>,
+ <STM32F429_PJ7_FUNC_LCD_G0>,
+ <STM32F429_PJ8_FUNC_LCD_G1>,
+ <STM32F429_PJ9_FUNC_LCD_G2>,
+ <STM32F429_PJ10_FUNC_LCD_G3>,
+ <STM32F429_PJ11_FUNC_LCD_G4>,
+ <STM32F429_PJ12_FUNC_LCD_B0>,
+ <STM32F429_PJ13_FUNC_LCD_B1>,
+ <STM32F429_PJ14_FUNC_LCD_B2>,
+ <STM32F429_PJ15_FUNC_LCD_B3>,
+ <STM32F429_PK0_FUNC_LCD_G5>,
+ <STM32F429_PK1_FUNC_LCD_G6>,
+ <STM32F429_PK2_FUNC_LCD_G7>,
+ <STM32F429_PK3_FUNC_LCD_B4>,
+ <STM32F429_PK4_FUNC_LCD_B5>,
+ <STM32F429_PK5_FUNC_LCD_B6>,
+ <STM32F429_PK6_FUNC_LCD_B7>,
+ <STM32F429_PK7_FUNC_LCD_DE>;
+ slew-rate = <2>;
+ };
+ };
+
+ dcmi_pins: dcmi@0 {
+ pins {
+ pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
+ <STM32F429_PB7_FUNC_DCMI_VSYNC>,
+ <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
+ <STM32F429_PC6_FUNC_DCMI_D0>,
+ <STM32F429_PC7_FUNC_DCMI_D1>,
+ <STM32F429_PC8_FUNC_DCMI_D2>,
+ <STM32F429_PC9_FUNC_DCMI_D3>,
+ <STM32F429_PC11_FUNC_DCMI_D4>,
+ <STM32F429_PD3_FUNC_DCMI_D5>,
+ <STM32F429_PB8_FUNC_DCMI_D6>,
+ <STM32F429_PE6_FUNC_DCMI_D7>,
+ <STM32F429_PC10_FUNC_DCMI_D8>,
+ <STM32F429_PC12_FUNC_DCMI_D9>,
+ <STM32F429_PD6_FUNC_DCMI_D10>,
+ <STM32F429_PD2_FUNC_DCMI_D11>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index c66d617e4245..5ceb2cf3777f 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -47,6 +47,7 @@
/dts-v1/;
#include "stm32f429.dtsi"
+#include "stm32f429-pinctrl.dtsi"
#include <dt-bindings/input/input.h>
/ {
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
new file mode 100644
index 000000000000..3e7a17d9112e
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "stm32f4-pinctrl.dtsi"
+
+/ {
+ soc {
+ pinctrl: pin-controller {
+ compatible = "st,stm32f429-pinctrl";
+
+ gpioa: gpio@40020000 {
+ gpio-ranges = <&pinctrl 0 0 16>;
+ };
+
+ gpiob: gpio@40020400 {
+ gpio-ranges = <&pinctrl 0 16 16>;
+ };
+
+ gpioc: gpio@40020800 {
+ gpio-ranges = <&pinctrl 0 32 16>;
+ };
+
+ gpiod: gpio@40020c00 {
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
+
+ gpioe: gpio@40021000 {
+ gpio-ranges = <&pinctrl 0 64 16>;
+ };
+
+ gpiof: gpio@40021400 {
+ gpio-ranges = <&pinctrl 0 80 16>;
+ };
+
+ gpiog: gpio@40021800 {
+ gpio-ranges = <&pinctrl 0 96 16>;
+ };
+
+ gpioh: gpio@40021c00 {
+ gpio-ranges = <&pinctrl 0 112 16>;
+ };
+
+ gpioi: gpio@40022000 {
+ gpio-ranges = <&pinctrl 0 128 16>;
+ };
+
+ gpioj: gpio@40022400 {
+ gpio-ranges = <&pinctrl 0 144 16>;
+ };
+
+ gpiok: gpio@40022800 {
+ gpio-ranges = <&pinctrl 0 160 8>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index dd7e99b1f43b..5b36eb114ddc 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -47,7 +47,6 @@
#include "skeleton.dtsi"
#include "armv7-m.dtsi"
-#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
#include <dt-bindings/clock/stm32fx-clock.h>
#include <dt-bindings/mfd/stm32f4-rcc.h>
@@ -591,302 +590,6 @@
status = "disabled";
};
- pinctrl: pin-controller {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,stm32f429-pinctrl";
- ranges = <0 0x40020000 0x3000>;
- interrupt-parent = <&exti>;
- st,syscfg = <&syscfg 0x8>;
- pins-are-numbered;
-
- gpioa: gpio@40020000 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x0 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
- st,bank-name = "GPIOA";
- };
-
- gpiob: gpio@40020400 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x400 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
- st,bank-name = "GPIOB";
- };
-
- gpioc: gpio@40020800 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x800 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
- st,bank-name = "GPIOC";
- };
-
- gpiod: gpio@40020c00 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0xc00 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
- st,bank-name = "GPIOD";
- };
-
- gpioe: gpio@40021000 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x1000 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
- st,bank-name = "GPIOE";
- };
-
- gpiof: gpio@40021400 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x1400 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
- st,bank-name = "GPIOF";
- };
-
- gpiog: gpio@40021800 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x1800 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
- st,bank-name = "GPIOG";
- };
-
- gpioh: gpio@40021c00 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x1c00 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
- st,bank-name = "GPIOH";
- };
-
- gpioi: gpio@40022000 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x2000 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
- st,bank-name = "GPIOI";
- };
-
- gpioj: gpio@40022400 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x2400 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
- st,bank-name = "GPIOJ";
- };
-
- gpiok: gpio@40022800 {
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x2800 0x400>;
- clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
- st,bank-name = "GPIOK";
- };
-
- usart1_pins_a: usart1@0 {
- pins1 {
- pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
- bias-disable;
- drive-push-pull;
- slew-rate = <0>;
- };
- pins2 {
- pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
- bias-disable;
- };
- };
-
- usart3_pins_a: usart3@0 {
- pins1 {
- pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
- bias-disable;
- drive-push-pull;
- slew-rate = <0>;
- };
- pins2 {
- pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
- bias-disable;
- };
- };
-
- usbotg_fs_pins_a: usbotg_fs@0 {
- pins {
- pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
- <STM32F429_PA11_FUNC_OTG_FS_DM>,
- <STM32F429_PA12_FUNC_OTG_FS_DP>;
- bias-disable;
- drive-push-pull;
- slew-rate = <2>;
- };
- };
-
- usbotg_fs_pins_b: usbotg_fs@1 {
- pins {
- pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
- <STM32F429_PB14_FUNC_OTG_HS_DM>,
- <STM32F429_PB15_FUNC_OTG_HS_DP>;
- bias-disable;
- drive-push-pull;
- slew-rate = <2>;
- };
- };
-
- usbotg_hs_pins_a: usbotg_hs@0 {
- pins {
- pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
- <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
- <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
- <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
- <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
- <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
- <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
- <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
- <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
- <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
- <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
- <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
- bias-disable;
- drive-push-pull;
- slew-rate = <2>;
- };
- };
-
- ethernet_mii: mii@0 {
- pins {
- pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
- <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
- <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
- <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
- <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
- <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
- <STM32F429_PA2_FUNC_ETH_MDIO>,
- <STM32F429_PC1_FUNC_ETH_MDC>,
- <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
- <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
- <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
- <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
- <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
- <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
- slew-rate = <2>;
- };
- };
-
- adc3_in8_pin: adc@200 {
- pins {
- pinmux = <STM32F429_PF10_FUNC_ANALOG>;
- };
- };
-
- pwm1_pins: pwm@1 {
- pins {
- pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
- <STM32F429_PB13_FUNC_TIM1_CH1N>,
- <STM32F429_PB12_FUNC_TIM1_BKIN>;
- };
- };
-
- pwm3_pins: pwm@3 {
- pins {
- pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
- <STM32F429_PB5_FUNC_TIM3_CH2>;
- };
- };
-
- i2c1_pins: i2c1@0 {
- pins {
- pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
- <STM32F429_PB6_FUNC_I2C1_SCL>;
- bias-disable;
- drive-open-drain;
- slew-rate = <3>;
- };
- };
-
- ltdc_pins: ltdc@0 {
- pins {
- pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
- <STM32F429_PI13_FUNC_LCD_VSYNC>,
- <STM32F429_PI14_FUNC_LCD_CLK>,
- <STM32F429_PI15_FUNC_LCD_R0>,
- <STM32F429_PJ0_FUNC_LCD_R1>,
- <STM32F429_PJ1_FUNC_LCD_R2>,
- <STM32F429_PJ2_FUNC_LCD_R3>,
- <STM32F429_PJ3_FUNC_LCD_R4>,
- <STM32F429_PJ4_FUNC_LCD_R5>,
- <STM32F429_PJ5_FUNC_LCD_R6>,
- <STM32F429_PJ6_FUNC_LCD_R7>,
- <STM32F429_PJ7_FUNC_LCD_G0>,
- <STM32F429_PJ8_FUNC_LCD_G1>,
- <STM32F429_PJ9_FUNC_LCD_G2>,
- <STM32F429_PJ10_FUNC_LCD_G3>,
- <STM32F429_PJ11_FUNC_LCD_G4>,
- <STM32F429_PJ12_FUNC_LCD_B0>,
- <STM32F429_PJ13_FUNC_LCD_B1>,
- <STM32F429_PJ14_FUNC_LCD_B2>,
- <STM32F429_PJ15_FUNC_LCD_B3>,
- <STM32F429_PK0_FUNC_LCD_G5>,
- <STM32F429_PK1_FUNC_LCD_G6>,
- <STM32F429_PK2_FUNC_LCD_G7>,
- <STM32F429_PK3_FUNC_LCD_B4>,
- <STM32F429_PK4_FUNC_LCD_B5>,
- <STM32F429_PK5_FUNC_LCD_B6>,
- <STM32F429_PK6_FUNC_LCD_B7>,
- <STM32F429_PK7_FUNC_LCD_DE>;
- slew-rate = <2>;
- };
- };
-
- dcmi_pins: dcmi@0 {
- pins {
- pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
- <STM32F429_PB7_FUNC_DCMI_VSYNC>,
- <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
- <STM32F429_PC6_FUNC_DCMI_D0>,
- <STM32F429_PC7_FUNC_DCMI_D1>,
- <STM32F429_PC8_FUNC_DCMI_D2>,
- <STM32F429_PC9_FUNC_DCMI_D3>,
- <STM32F429_PC11_FUNC_DCMI_D4>,
- <STM32F429_PD3_FUNC_DCMI_D5>,
- <STM32F429_PB8_FUNC_DCMI_D6>,
- <STM32F429_PE6_FUNC_DCMI_D7>,
- <STM32F429_PC10_FUNC_DCMI_D8>,
- <STM32F429_PC12_FUNC_DCMI_D9>,
- <STM32F429_PD6_FUNC_DCMI_D10>,
- <STM32F429_PD2_FUNC_DCMI_D11>;
- bias-disable;
- drive-push-pull;
- slew-rate = <3>;
- };
- };
- };
-
crc: crc@40023000 {
compatible = "st,stm32f4-crc";
reg = <0x40023000 0x400>;
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 6ae1f037f3f0..c18acbe4cf4e 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -47,6 +47,7 @@
/dts-v1/;
#include "stm32f429.dtsi"
+#include "stm32f469-pinctrl.dtsi"
/ {
model = "STMicroelectronics STM32F469i-DISCO board";
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
new file mode 100644
index 000000000000..fff542662eea
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "stm32f4-pinctrl.dtsi"
+
+/ {
+ soc {
+ pinctrl: pin-controller {
+ compatible = "st,stm32f469-pinctrl";
+
+ gpioa: gpio@40020000 {
+ gpio-ranges = <&pinctrl 0 0 16>;
+ };
+
+ gpiob: gpio@40020400 {
+ gpio-ranges = <&pinctrl 0 16 16>;
+ };
+
+ gpioc: gpio@40020800 {
+ gpio-ranges = <&pinctrl 0 32 16>;
+ };
+
+ gpiod: gpio@40020c00 {
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
+
+ gpioe: gpio@40021000 {
+ gpio-ranges = <&pinctrl 0 64 16>;
+ };
+
+ gpiof: gpio@40021400 {
+ gpio-ranges = <&pinctrl 0 80 16>;
+ };
+
+ gpiog: gpio@40021800 {
+ gpio-ranges = <&pinctrl 0 96 16>;
+ };
+
+ gpioh: gpio@40021c00 {
+ gpio-ranges = <&pinctrl 0 112 16>;
+ };
+
+ gpioi: gpio@40022000 {
+ gpio-ranges = <&pinctrl 0 128 16>;
+ };
+
+ gpioj: gpio@40022400 {
+ gpio-ranges = <&pinctrl 0 144 6>,
+ <&pinctrl 12 156 4>;
+ };
+
+ gpiok: gpio@40022800 {
+ gpio-ranges = <&pinctrl 3 163 5>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index d2d75fa664a6..2a63fa10c813 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
+CONFIG_PATA_FTIDE010=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GEMINI=y
CONFIG_DMADEVICES=y
+CONFIG_AMBA_PL08X=y
# CONFIG_DNOTIFY is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 64e3a2a8cede..d5e1370ec303 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -471,7 +471,7 @@ CONFIG_LCD_PLATFORM=m
CONFIG_LCD_TOSA=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_TOSA=m
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
CONFIG_SOUND=m
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 44d4fa57ba0a..070e5074f1ee 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_PWM=m
# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 8d4c0c926c34..09e7050d5653 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -112,7 +112,7 @@ CONFIG_FB_PXA=m
CONFIG_FB_PXA_PARAMETERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 5036f996e694..849014c01cf4 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void)
}
pm_bu->suspended = 0;
- pm_bu->canary = virt_to_phys(&canary);
- pm_bu->resume = virt_to_phys(cpu_resume);
+ pm_bu->canary = __pa_symbol(&canary);
+ pm_bu->resume = __pa_symbol(cpu_resume);
return;
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 5b614388d72f..6d28aa20a7d3 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
struct platform_device *pdev;
int res;
- if (omap_hsmmc_done != 1)
+ if (omap_hsmmc_done)
return;
- omap_hsmmc_done++;
+ omap_hsmmc_done = 1;
for (; c->mmc; c++) {
pdev = c->pdev;
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index f040244c57e7..2f4f7002f38d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = {
.name = "gpio1",
.class = &dra7xx_gpio_hwmod_class,
.clkdm_name = "wkupaon_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.main_clk = "wkupaon_iclk_mux",
.prcm = {
.omap4 = {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index c89010e56488..4157987f4a3d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -168,7 +168,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -194,7 +195,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -212,10 +214,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 9697a7a79464..4b17a76959b2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -107,6 +107,9 @@
states = <3300000 0>,
<1800000 1>;
+
+ regulator-settling-time-up-us = <100>;
+ regulator-settling-time-down-us = <5000>;
};
wifi_32k: wifi-32k {
@@ -250,7 +253,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -276,11 +280,16 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
- max-frequency = <100000000>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ max-frequency = <200000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
@@ -294,10 +303,10 @@
&sd_emmc_c {
status = "disabled";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
max-frequency = <200000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 9c59c3c6d1b6..38dfdde5c147 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -51,7 +51,7 @@
/ {
compatible = "nexbox,a95x", "amlogic,meson-gxbb";
model = "NEXBOX A95X";
-
+
aliases {
serial0 = &uart_AO;
};
@@ -232,7 +232,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -253,7 +254,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -271,10 +273,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index d147c853ab05..1ffa1c238a72 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -50,7 +50,7 @@
/ {
compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
model = "Hardkernel ODROID-C2";
-
+
aliases {
serial0 = &uart_AO;
};
@@ -253,7 +253,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -271,10 +272,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
max-frequency = <200000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 81ffc689a5bf..23c08c3afd0a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -194,7 +194,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -220,10 +221,14 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;
@@ -238,10 +243,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 346753fb6324..f2bc6dea1fc6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -155,7 +155,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins &sdio_irq_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -181,7 +182,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -198,10 +200,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 52f1687e7a09..af834cdbba79 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -392,6 +392,17 @@
};
};
+ emmc_clk_gate_pins: emmc_clk_gate {
+ mux {
+ groups = "BOOT_8";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "BOOT_8";
+ bias-pull-down;
+ };
+ };
+
nor_pins: nor {
mux {
groups = "nor_d",
@@ -430,6 +441,17 @@
};
};
+ sdcard_clk_gate_pins: sdcard_clk_gate {
+ mux {
+ groups = "CARD_2";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "CARD_2";
+ bias-pull-down;
+ };
+ };
+
sdio_pins: sdio {
mux {
groups = "sdio_d0",
@@ -442,6 +464,17 @@
};
};
+ sdio_clk_gate_pins: sdio_clk_gate {
+ mux {
+ groups = "GPIOX_4";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "GPIOX_4";
+ bias-pull-down;
+ };
+ };
+
sdio_irq_pins: sdio_irq {
mux {
groups = "sdio_irq";
@@ -661,21 +694,21 @@
&sd_emmc_a {
clocks = <&clkc CLKID_SD_EMMC_A>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_A_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_b {
clocks = <&clkc CLKID_SD_EMMC_B>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_B_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_c {
clocks = <&clkc CLKID_SD_EMMC_C>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_C_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index 2a5804ce7f4b..977b4240f3c1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -123,7 +123,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -141,10 +142,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <100000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 69ca14ac10fa..64c54c92e214 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -91,6 +91,9 @@
states = <3300000 0>,
<1800000 1>;
+
+ regulator-settling-time-up-us = <200>;
+ regulator-settling-time-down-us = <50000>;
};
vddio_boot: regulator-vddio_boot {
@@ -197,10 +200,14 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;
@@ -215,10 +222,12 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
cap-mmc-highspeed;
+ mmc-ddr-3_3v;
max-frequency = <50000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 4c2ac7650fcd..1b8f32867aa1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -189,7 +189,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -210,7 +211,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -228,10 +230,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index f3eea8e89d12..129af9068814 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -95,7 +95,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -116,7 +117,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -134,10 +136,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index d6876e64979e..d8dd3298b15c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -281,6 +281,17 @@
};
};
+ emmc_clk_gate_pins: emmc_clk_gate {
+ mux {
+ groups = "BOOT_8";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "BOOT_8";
+ bias-pull-down;
+ };
+ };
+
nor_pins: nor {
mux {
groups = "nor_d",
@@ -319,6 +330,17 @@
};
};
+ sdcard_clk_gate_pins: sdcard_clk_gate {
+ mux {
+ groups = "CARD_2";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "CARD_2";
+ bias-pull-down;
+ };
+ };
+
sdio_pins: sdio {
mux {
groups = "sdio_d0",
@@ -331,6 +353,17 @@
};
};
+ sdio_clk_gate_pins: sdio_clk_gate {
+ mux {
+ groups = "GPIOX_4";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "GPIOX_4";
+ bias-pull-down;
+ };
+ };
+
sdio_irq_pins: sdio_irq {
mux {
groups = "sdio_irq";
@@ -603,21 +636,21 @@
&sd_emmc_a {
clocks = <&clkc CLKID_SD_EMMC_A>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_A_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_b {
clocks = <&clkc CLKID_SD_EMMC_B>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_B_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_c {
clocks = <&clkc CLKID_SD_EMMC_C>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_C_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 9b10c5f4f8c0..22c697732f66 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -175,7 +175,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -193,10 +194,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 08f1dd69b679..470f72bb863c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -220,7 +220,6 @@
pinctrl-names = "default";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 4d360713ed12..30d48ecf46e0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -254,7 +254,7 @@
ap_syscon: system-controller@6f4000 {
compatible = "syscon", "simple-mfd";
- reg = <0x6f4000 0x1000>;
+ reg = <0x6f4000 0x2000>;
ap_clk: clock {
compatible = "marvell,ap806-clock";
@@ -265,7 +265,7 @@
compatible = "marvell,ap806-pinctrl";
};
- ap_gpio: gpio {
+ ap_gpio: gpio@1040 {
compatible = "marvell,armada-8k-gpio";
offset = <0x1040>;
ngpios = <20>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index e0518b4bc6c2..19fbaa5e7bdd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -113,8 +113,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
+
#cooling-cells = <2>; /* min followed by max */
};
@@ -123,8 +122,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x1>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_l2: cpu@2 {
@@ -132,8 +129,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x2>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_l3: cpu@3 {
@@ -141,8 +136,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x3>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_b0: cpu@100 {
@@ -150,8 +143,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x100>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
+
#cooling-cells = <2>; /* min followed by max */
};
@@ -160,8 +152,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x101>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
};
cpu_b2: cpu@102 {
@@ -169,8 +159,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x102>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
};
cpu_b3: cpu@103 {
@@ -178,62 +166,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x103>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
- };
- };
-
- cluster0_opp: opp-table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <312000000>;
- opp-microvolt = <950000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <950000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <950000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <1025000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <1125000>;
- };
- };
-
- cluster1_opp: opp-table1 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <312000000>;
- opp-microvolt = <950000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <950000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <950000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <975000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <1050000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d79e9b3265b9..ab7629c5b856 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1629,9 +1629,9 @@
compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
reg = <0x0 0xff960000 0x0 0x8000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>,
- <&cru SCLK_DPHY_TX0_CFG>;
- clock-names = "ref", "pclk", "phy_cfg";
+ clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
+ <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
+ clock-names = "ref", "pclk", "phy_cfg", "grf";
power-domains = <&power RK3399_PD_VIO>;
rockchip,grf = <&grf>;
status = "disabled";
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3585a5e26151..f7c4d2146aed 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -95,16 +95,19 @@
#define KERNEL_END _end
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_SHADOW_SIZE (0)
+#define KASAN_THREAD_SHIFT 0
#endif
-#define MIN_THREAD_SHIFT 14
+#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index f0e6d717885b..d06fbe4cd38d 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
return 0;
}
-late_initcall(armv8_deprecated_init);
+core_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index cd52d365d1f0..21e2c95d24e7 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void)
return 0;
}
-late_initcall(enable_mrs_emulation);
+core_initcall(enable_mrs_emulation);
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f444f374bd7b..5d547deb6996 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -444,4 +444,4 @@ static int __init fpsimd_init(void)
return 0;
}
-late_initcall(fpsimd_init);
+core_initcall(fpsimd_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2069e9bc0fca..b64958b23a7f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
} else {
- pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
+ pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
}
pr_alert(" CM = %lu, WnR = %lu\n",
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 87cde1e4b38c..0777f3a8a1f3 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -194,6 +194,10 @@ config TIMER_DIVIDE
int "Timer divider (integer)"
default "128"
+config CPU_BIG_ENDIAN
+ bool "Generate big endian code"
+ default n
+
config CPU_LITTLE_ENDIAN
bool "Generate little endian code"
default n
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 647dd94a0c39..72b96f282689 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -114,6 +114,15 @@ static void set_eit_vector_entries(void)
_flush_cache_copyback_all();
}
+void abort(void)
+{
+ BUG();
+
+ /* if that doesn't kill us, halt */
+ panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
void __init trap_init(void)
{
set_eit_vector_entries();
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 903f3bf48419..7e25c5cc353a 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return __cmpxchg_small(ptr, old, new, size);
case 4:
- return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
+ return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
+ (u32)old, new);
case 8:
/* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer();
- return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
+ return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
+ (u64)old, new);
default:
return __cmpxchg_called_with_bad_pointer();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 100f23dfa438..ac584c5823d0 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
}
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
- .bus_id = 0,
- .phy_addr = -1,
+ .bus_id = 0,
+ .phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
- .interface = PHY_INTERFACE_MODE_MII,
+ .interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
- .interface = PHY_INTERFACE_MODE_RMII,
+ .interface = PHY_INTERFACE_MODE_RMII,
#endif
- .mdio_bus_data = &ls1x_mdio_bus_data,
- .dma_cfg = &ls1x_eth_dma_cfg,
- .has_gmac = 1,
- .tx_coe = 1,
- .init = ls1x_eth_mux_init,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
#ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
- .bus_id = 1,
- .phy_addr = -1,
- .interface = PHY_INTERFACE_MODE_MII,
- .mdio_bus_data = &ls1x_mdio_bus_data,
- .dma_cfg = &ls1x_eth_dma_cfg,
- .has_gmac = 1,
- .tx_coe = 1,
- .init = ls1x_eth_mux_init,
+ .bus_id = 1,
+ .phy_addr = -1,
+ .interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth1_resources[] = {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 192542dbd972..16d9ef5a78c5 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2558,7 +2558,6 @@ dcopuop:
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL;
}
}
@@ -2719,7 +2718,6 @@ dcopuop:
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL;
}
}
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 7646891c4e9b..01b7a87ea678 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
{
int src, dst, r, td, ts, mem_off, b_off;
bool need_swap, did_move, cmp_eq;
- unsigned int target;
+ unsigned int target = 0;
u64 t64;
s64 t64s;
int bpf_op = BPF_OP(insn->code);
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh
index 5c4f93687039..654d652d7fa1 100755
--- a/arch/mips/tools/generic-board-config.sh
+++ b/arch/mips/tools/generic-board-config.sh
@@ -30,8 +30,6 @@ cfg="$4"
boards_origin="$5"
shift 5
-cd "${srctree}"
-
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
# general case it only serves to obscure the useful output about what actually
# was included.
@@ -48,7 +46,7 @@ environment*)
esac
for board in $@; do
- board_cfg="arch/mips/configs/generic/board-${board}.config"
+ board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
if [ ! -f "${board_cfg}" ]; then
echo "WARNING: Board config '${board_cfg}' not found"
continue
@@ -84,7 +82,7 @@ for board in $@; do
done || continue
# Merge this board config fragment into our final config file
- ./scripts/kconfig/merge_config.sh \
+ ${srctree}/scripts/kconfig/merge_config.sh \
-m -O ${objtree} ${cfg} ${board_cfg} \
| grep -Ev '^(#|Using)'
done
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index a45a67d526f8..30f92391a93e 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -146,7 +146,7 @@ void machine_power_off(void)
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
- lockup_detector_suspend();
+ lockup_detector_soft_poweroff();
for (;;);
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 1df770e8cbe0..7275fed271af 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
case PVR_POWER8:
case PVR_POWER8E:
case PVR_POWER8NVL:
- __flush_tlb_power8(POWER8_TLB_SETS);
+ __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
break;
case PVR_POWER9:
- __flush_tlb_power9(POWER9_TLB_SETS_HASH);
+ __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
break;
default:
pr_err("unknown CPU version for boot TLB flush\n");
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 48da0f5d2f7f..b82586c53560 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
TRAMP_KVM(PACA_EXGEN, 0x700)
EXC_COMMON_BEGIN(program_check_common)
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ /*
+ * It's possible to receive a TM Bad Thing type program check with
+ * userspace register values (in particular r1), but with SRR1 reporting
+ * that we came from the kernel. Normally that would confuse the bad
+ * stack logic, and we would report a bad kernel stack pointer. Instead
+ * we switch to the emergency stack if we're taking a TM Bad Thing from
+ * the kernel.
+ */
+ li r10,MSR_PR /* Build a mask of MSR_PR .. */
+ oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
+ and r10,r10,r12 /* Mask SRR1 with that. */
+ srdi r10,r10,8 /* Shift it so we can compare */
+ cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
+ bne 1f /* If != go to normal path. */
+
+ /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
+ andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
+ /* 3 in EXCEPTION_PROLOG_COMMON */
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ b 3f /* Jump into the macro !! */
+1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b76ca198e09c..72f153c6f3fa 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
+ /*
+ * On POWER9 DD2.1 and below, it's possible to get a machine check
+ * caused by a paste instruction where only DSISR bit 25 is set. This
+ * will result in the MCE handler seeing an unknown event and the kernel
+ * crashing. An MCE that occurs like this is spurious, so we don't need
+ * to do anything in terms of servicing it. If there is something that
+ * needs to be serviced, the CPU will raise the MCE again with the
+ * correct DSISR so that it can be serviced properly. So detect this
+ * case and mark it as handled.
+ */
+ if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
+ return 1;
+
return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 0ac741fae90e..2e3bc16d02b2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p)
#endif
#endif
-#ifdef CONFIG_PPC_64K_PAGES
- init_mm.context.pte_frag = NULL;
-#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(&init_mm);
#endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c83c115858c1..b2c002993d78 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
if (MSR_TM_RESV(msr))
return -EINVAL;
- /* pull in MSR TM from user context */
+ /* pull in MSR TS bits from user context */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ /*
+ * Ensure that TM is enabled in regs->msr before we leave the signal
+ * handler. It could be the case that (a) user disabled the TM bit
+ * through the manipulation of the MSR bits in uc_mcontext or (b) the
+ * TM bit was disabled because a sufficient number of context switches
+ * happened whilst in the signal handler and load_tm overflowed,
+ * disabling the TM bit. In either case we can end up with an illegal
+ * TM state leading to a TM Bad Thing when we return to userspace.
+ */
+ regs->msr |= MSR_TM;
+
/* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index c98e90b4ea7b..b4e2b7165f79 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
* - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
- * - r0 & r12 are free
- *
- * r0 can't be used as the base register for a DS-form load or store, so
- * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+ * - r0, r11 & r12 are free
*/
livepatch_handler:
CURRENT_THREAD_INFO(r12, r1)
- /* Save stack pointer into r0 */
- mr r0, r1
-
/* Allocate 3 x 8 bytes */
- ld r1, TI_livepatch_sp(r12)
- addi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
+ ld r11, TI_livepatch_sp(r12)
+ addi r11, r11, 24
+ std r11, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */
- std r2, -24(r1)
+ std r2, -24(r11)
mflr r12
- std r12, -16(r1)
+ std r12, -16(r11)
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
- std r12, -8(r1)
-
- /* Restore real stack pointer */
- mr r1, r0
+ std r12, -8(r11)
/* Put ctr in r12 for global entry and branch there */
mfctr r12
@@ -216,36 +207,30 @@ livepatch_handler:
/*
* Now we are returning from the patched function to the original
- * caller A. We are free to use r0 and r12, and we can use r2 until we
+ * caller A. We are free to use r11, r12 and we can use r2 until we
* restore it.
*/
CURRENT_THREAD_INFO(r12, r1)
- /* Save stack pointer into r0 */
- mr r0, r1
-
- ld r1, TI_livepatch_sp(r12)
+ ld r11, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l
- ld r12, -8(r1)
+ ld r12, -8(r11)
1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */
- ld r12, -16(r1)
+ ld r12, -16(r11)
mtlr r12
- ld r2, -24(r1)
+ ld r2, -24(r11)
/* Pop livepatch stack frame */
- CURRENT_THREAD_INFO(r12, r0)
- subi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
-
- /* Restore real stack pointer */
- mr r1, r0
+ CURRENT_THREAD_INFO(r12, r1)
+ subi r11, r11, 24
+ std r11, TI_livepatch_sp(r12)
/* Return to original caller of live patched function */
blr
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2f6eadd9408d..c702a8981452 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu)
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
return 0;
- if (watchdog_suspended)
- return 0;
-
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return 0;
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void)
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
}
-void watchdog_nmi_reconfigure(void)
+void watchdog_nmi_stop(void)
{
int cpu;
- watchdog_calc_timeouts();
-
for_each_cpu(cpu, &wd_cpus_enabled)
stop_wd_on_cpu(cpu);
+}
+void watchdog_nmi_start(void)
+{
+ int cpu;
+
+ watchdog_calc_timeouts();
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
start_wd_on_cpu(cpu);
}
/*
- * This runs after lockup_detector_init() which sets up watchdog_cpumask.
+ * Invoked from core watchdog init.
*/
-static int __init powerpc_watchdog_init(void)
+int __init watchdog_nmi_probe(void)
{
int err;
- watchdog_calc_timeouts();
-
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
- start_wd_on_cpu, stop_wd_on_cpu);
- if (err < 0)
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "powerpc/watchdog:online",
+ start_wd_on_cpu, stop_wd_on_cpu);
+ if (err < 0) {
pr_warn("Watchdog could not be initialized");
-
+ return err;
+ }
return 0;
}
-arch_initcall(powerpc_watchdog_init);
static void handle_backtrace_ipi(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 13304622ab1c..bf457843e032 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
return -EINVAL;
state = &sb->irq_state[idx];
arch_spin_lock(&sb->lock);
- *server = state->guest_server;
+ *server = state->act_server;
*priority = state->guest_priority;
arch_spin_unlock(&sb->lock);
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
xive->saved_src_count++;
/* Convert saved state into something compatible with xics */
- val = state->guest_server;
+ val = state->act_server;
prio = state->saved_scan_prio;
if (prio == MASKED) {
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
/* First convert prio and mark interrupt as untargetted */
act_prio = xive_prio_from_guest(guest_prio);
state->act_priority = MASKED;
- state->guest_server = server;
/*
* We need to drop the lock due to the mutex below. Hopefully
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 5938f7644dc1..6ba63f8e8a61 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
/* Targetting as set by guest */
- u32 guest_server; /* Current guest selected target */
u8 guest_priority; /* Guest set priority */
u8 saved_priority; /* Saved priority when masking */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5e8418c28bd8..f208f560aecd 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* Logical instructions
*/
case 26: /* cntlzw */
- op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
+ val = (unsigned int) regs->gpr[rd];
+ op->val = ( val ? __builtin_clz(val) : 32 );
goto logical_done;
#ifdef __powerpc64__
case 58: /* cntlzd */
- op->val = __builtin_clzl(regs->gpr[rd]);
+ val = regs->gpr[rd];
+ op->val = ( val ? __builtin_clzl(val) : 64 );
goto logical_done;
#endif
case 28: /* and */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b95c584ce19d..a51df9ef529d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1438,7 +1438,6 @@ out:
int arch_update_cpu_topology(void)
{
- lockdep_assert_cpus_held();
return numa_update_cpu_topology(true);
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 65eda1997c3f..f6c7f54c0515 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
break;
}
wmb();
+ local_irq_restore(flags);
flush_tlb_kernel_range((unsigned long)page_address(start),
(unsigned long)page_address(page));
- local_irq_restore(flags);
return err;
}
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9ccac86f3463..88126245881b 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
/* Take the mutex lock for this node and then decrement the reference count */
mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+ * started, followed by offlining of all cpus in a given node.
+ *
+ * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
+ * function set the ref->count to zero, if the cpu which is
+ * about to offline is the last cpu in a given node and make
+ * an OPAL call to disable the engine in that node.
+ *
+ */
+ mutex_unlock(&ref->lock);
+ return;
+ }
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
/* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- get_order(size)));
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
return -ENOMEM;
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
return;
mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+ * started, followed by offlining of all cpus in a given core.
+ *
+ * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
+ * function set the ref->count to zero, if the cpu which is
+ * about to offline is the last cpu in a given core and make
+ * an OPAL call to disable the engine in that core.
+ *
+ */
+ mutex_unlock(&ref->lock);
+ return;
+ }
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
* free the memory in cpu offline path.
*/
local_mem = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- get_order(size)));
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size)));
if (!local_mem)
return -ENOMEM;
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
}
/* Only free the attr_groups which are dynamically allocated */
- kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
+ if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
+ kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
return;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 897aa1400eb8..bbb73aa0eb8f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static unsigned long pnv_memory_block_size(void)
{
- return 256UL * 1024 * 1024;
+ /*
+ * We map the kernel linear region with 1GB large pages on radix. For
+ * memory hot unplug to work our memory block size must be at least
+ * this size.
+ */
+ if (radix_enabled())
+ return 1UL * 1024 * 1024 * 1024;
+ else
+ return 256UL * 1024 * 1024;
}
#endif
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9234be1e66f5..5011ffea4e4b 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -71,6 +71,8 @@
#define RIWAR_WRTYP_ALLOC 0x00006000
#define RIWAR_SIZE_MASK 0x0000003F
+static DEFINE_SPINLOCK(fsl_rio_config_lock);
+
#define __fsl_read_rio_config(x, addr, err, op) \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
@@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 *val)
{
struct rio_priv *priv = mport->priv;
+ unsigned long flags;
u8 *data;
u32 rval, err = 0;
@@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
+ spin_lock_irqsave(&fsl_rio_config_lock, flags);
+
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
__fsl_read_rio_config(rval, data, err, "lwz");
break;
default:
+ spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
return -EINVAL;
}
@@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
err, destid, hopcount, offset);
}
+ spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
*val = rval;
return err;
@@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 val)
{
struct rio_priv *priv = mport->priv;
+ unsigned long flags;
u8 *data;
+ int ret = 0;
+
pr_debug
("fsl_rio_config_write:"
" index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
@@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
+ spin_lock_irqsave(&fsl_rio_config_lock, flags);
+
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
out_be32((u32 *) data, val);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+ spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
- return 0;
+ return ret;
}
static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index ab7a74c75be8..88b35a3dcdc5 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -104,6 +104,8 @@
#define DOORBELL_MESSAGE_SIZE 0x08
+static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
+
struct rio_msg_regs {
u32 omr;
u32 osr;
@@ -626,9 +628,13 @@ err_out:
int fsl_rio_doorbell_send(struct rio_mport *mport,
int index, u16 destid, u16 data)
{
+ unsigned long flags;
+
pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
index, destid, data);
+ spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
+
/* In the serial version silicons, such as MPC8548, MPC8641,
* below operations is must be.
*/
@@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport,
out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
out_be32(&dbell->dbell_regs->odmr, 0x00000001);
+ spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
+
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index f387318678b9..a3b8d7d1316e 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void)
if (xive_ops->teardown_cpu)
xive_ops->teardown_cpu(cpu, xc);
+
+#ifdef CONFIG_SMP
+ /* Get rid of IPI */
+ xive_cleanup_cpu_ipi(cpu, xc);
+#endif
+
+ /* Disable and free the queues */
+ xive_cleanup_cpu_queues(cpu, xc);
}
void xive_kexec_teardown_cpu(int secondary)
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index f24a70bc6855..d9c4c9366049 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
+ if (!xc->hw_ipi)
+ return;
+
xive_irq_bitmap_free(xc->hw_ipi);
+ xc->hw_ipi = 0;
}
#endif /* CONFIG_SMP */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
index 4d1ef6d74bd6..2ae0e938b657 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
@@ -43,9 +43,7 @@ enum {
GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
- /* Port H */
- GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
- GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
+ /* Port H - Port H does not have a Data Register */
/* Port I - not on device */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 2a0ca8780f0d..13c495a9fc00 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -45,9 +45,7 @@ enum {
GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
- /* Port H */
- GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
- GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
+ /* Port H - Port H does not have a Data Register */
/* Port I - not on device */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 3bb74e534d0f..78961ab78a5a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -67,7 +67,7 @@ enum {
GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
/* PTQ */
- GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
+ GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
/* PTR */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 5340f3bc1863..b40fb541e72a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -40,7 +40,7 @@ enum {
/* PTJ */
GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3,
- GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV,
+ GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6,
/* PTK */
GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3,
@@ -48,7 +48,7 @@ enum {
/* PTL */
GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3,
- GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV,
+ GPIO_PTL4, GPIO_PTL5, GPIO_PTL6,
/* PTM */
GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3,
@@ -56,7 +56,7 @@ enum {
/* PTN */
GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3,
- GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV,
+ GPIO_PTN4, GPIO_PTN5, GPIO_PTN6,
/* PTO */
GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3,
@@ -68,7 +68,7 @@ enum {
/* PTQ */
GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3,
- GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV,
+ GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6,
/* PTR */
GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0be3828752e5..4e83f950713e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -44,7 +44,6 @@ config SPARC
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select LOCKDEP_SMALL if LOCKDEP
- select ARCH_WANT_RELAX_ORDER
config SPARC32
def_bool !64BIT
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 8a13d468635a..50e0d2bc4528 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -176,7 +176,7 @@
/*
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
- * is just setting the LSB, which makes it an invalid stack address and is also
+ * is just clearing the MSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@@ -185,7 +185,7 @@
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
mov %esp, %ebp
- orl $0x1, %ebp
+ andl $0x7fffffff, %ebp
#endif
.endm
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 49167258d587..f6cdb7a1455e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
- UNWIND_HINT_IRET_REGS offset=8
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
/* Sanity check */
.if \shift_ist != -1 && \paranoid == 0
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 829e89cfcee2..9fb9a1f1e47b 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void)
return 0;
}
- if (lockup_detector_suspend() != 0) {
- pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
- return 0;
- }
+ cpus_read_lock();
+
+ hardlockup_detector_perf_stop();
x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void)
x86_pmu.commit_scheduling = NULL;
x86_pmu.stop_scheduling = NULL;
- lockup_detector_resume();
-
- cpus_read_lock();
+ hardlockup_detector_perf_restart();
for_each_online_cpu(c)
free_excl_cntrs(c);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 1c5390f1cf09..d45e06346f14 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
pmus[i].type = type;
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
if (!pmus[i].boxes)
- return -ENOMEM;
+ goto err;
}
type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL);
if (!attr_group)
- return -ENOMEM;
+ goto err;
attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
}
type->pmu_group = &uncore_pmu_attr_group;
+
return 0;
+
+err:
+ for (i = 0; i < type->num_boxes; i++)
+ kfree(pmus[i].boxes);
+ kfree(pmus);
+
+ return -ENOMEM;
}
static int __init
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 1a8eb550c40f..a5db63f728a2 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index);
+u32 hv_max_vp_index;
+
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
hv_vp_index[smp_processor_id()] = msr_vp_index;
+ if (msr_vp_index > hv_max_vp_index)
+ hv_max_vp_index = msr_vp_index;
+
return 0;
}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 39e7f6e50919..9cc9e1c1e2db 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
-static struct hv_flush_pcpu __percpu *pcpu_flush;
+static struct hv_flush_pcpu __percpu **pcpu_flush;
-static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
+static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ /* valid_bank_mask can represent up to 64 banks */
+ if (hv_max_vp_index / 64 >= 64)
+ return 0;
+
+ /*
+ * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
+ * structs are not cleared between calls, we risk flushing unneeded
+ * vCPUs otherwise.
+ */
+ for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+ flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
+
/*
* Some banks may end up being empty but this is acceptable.
*/
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
-
- /* valid_bank_mask can represent up to 64 banks */
- if (vcpu_bank >= 64)
- return 0;
-
__set_bit(vcpu_offset, (unsigned long *)
&flush->hv_vp_set.bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int cpu, vcpu, gva_n, max_gvas;
+ struct hv_flush_pcpu **flush_pcpu;
struct hv_flush_pcpu *flush;
u64 status = U64_MAX;
unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
local_irq_save(flags);
- flush = this_cpu_ptr(pcpu_flush);
+ flush_pcpu = this_cpu_ptr(pcpu_flush);
+
+ if (unlikely(!*flush_pcpu))
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+ flush = *flush_pcpu;
+
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto do_native;
+ }
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
+ struct hv_flush_pcpu_ex **flush_pcpu;
struct hv_flush_pcpu_ex *flush;
u64 status = U64_MAX;
unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
local_irq_save(flags);
- flush = this_cpu_ptr(pcpu_flush_ex);
+ flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
+
+ if (unlikely(!*flush_pcpu))
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+ flush = *flush_pcpu;
+
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto do_native;
+ }
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
- 0, nr_bank + 2, flush, NULL);
+ 0, nr_bank, flush, NULL);
} else if (info->end &&
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
- 0, nr_bank + 2, flush, NULL);
+ 0, nr_bank, flush, NULL);
} else {
gva_n = fill_gva_list(flush->gva_list, nr_bank,
info->start, info->end);
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
- gva_n, nr_bank + 2, flush, NULL);
+ gva_n, nr_bank, flush, NULL);
}
local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
return;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+ pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
else
- pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+ pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index e7636bac7372..6c98821fef5e 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -62,8 +62,10 @@
#define new_len2 145f-144f
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index c096624137ae..ccbe24e697c4 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
alt_end_marker ":\n"
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
-#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index c40a95c33bb8..322d25ae23ab 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index bc62e7cbf1b1..59ad3d132353 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
void __init kvm_guest_init(void);
-void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
#else /* CONFIG_KVM_GUEST */
#define kvm_guest_init() do {} while (0)
-#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wait(T, I) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)
static inline bool kvm_para_available(void)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 181264989db5..8edac1de2e35 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -187,7 +187,6 @@ struct mca_msr_regs {
extern struct mce_vendor_flags mce_flags;
-extern struct mca_config mca_cfg;
extern struct mca_msr_regs msr_ops;
enum mce_notifier_prios {
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index c120b5db178a..3c856a15b98e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
DEBUG_LOCKS_WARN_ON(preemptible());
}
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
- int cpu = smp_processor_id();
-
- if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_clear_cpu(cpu, mm_cpumask(mm));
-}
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 738503e1f80c..530f448fddaf 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
* to this information.
*/
extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4893abf7f74f..c4aed0de565e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -82,6 +82,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
+static inline bool tlb_defer_switch_to_init_mm(void)
+{
+ /*
+ * If we have PCID, then switching to init_mm is reasonably
+ * fast. If we don't have PCID, then switching to init_mm is
+ * quite slow, so we try to defer it in the hopes that we can
+ * avoid it entirely. The latter approach runs the risk of
+ * receiving otherwise unnecessary IPIs.
+ *
+ * This choice is just a heuristic. The tlb code can handle this
+ * function returning true or false regardless of whether we have
+ * PCID.
+ */
+ return !static_cpu_has(X86_FEATURE_PCID);
+}
+
/*
* 6 because 6 should be plenty and struct tlb_state will fit in
* two cache lines.
@@ -105,6 +121,23 @@ struct tlb_state {
u16 next_asid;
/*
+ * We can be in one of several states:
+ *
+ * - Actively using an mm. Our CPU's bit will be set in
+ * mm_cpumask(loaded_mm) and is_lazy == false;
+ *
+ * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
+ * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+ *
+ * - Lazily using a real mm. loaded_mm != &init_mm, our bit
+ * is set in mm_cpumask(loaded_mm), but is_lazy == true.
+ * We're heuristically guessing that the CR3 load we
+ * skipped more than makes up for the overhead added by
+ * lazy mode.
+ */
+ bool is_lazy;
+
+ /*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 458da8509b75..6db28f17ff28 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
{}
};
+#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
+
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
@@ -402,11 +406,48 @@ void amd_flush_garts(void)
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
+static void __fix_erratum_688(void *info)
+{
+#define MSR_AMD64_IC_CFG 0xC0011021
+
+ msr_set_bit(MSR_AMD64_IC_CFG, 3);
+ msr_set_bit(MSR_AMD64_IC_CFG, 14);
+}
+
+/* Apply erratum 688 fix so machines without a BIOS fix work. */
+static __init void fix_erratum_688(void)
+{
+ struct pci_dev *F4;
+ u32 val;
+
+ if (boot_cpu_data.x86 != 0x14)
+ return;
+
+ if (!amd_northbridges.num)
+ return;
+
+ F4 = node_to_amd_nb(0)->link;
+ if (!F4)
+ return;
+
+ if (pci_read_config_dword(F4, 0x164, &val))
+ return;
+
+ if (val & BIT(2))
+ return;
+
+ on_each_cpu(__fix_erratum_688, NULL, 0);
+
+ pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
+}
+
static __init int init_amd_nbs(void)
{
amd_cache_northbridges();
amd_cache_gart();
+ fix_erratum_688();
+
return 0;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d705c769f77d..ff891772c9f8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
return ~0U;
}
+static u32 skx_deadline_rev(void)
+{
+ switch (boot_cpu_data.x86_mask) {
+ case 0x03: return 0x01000136;
+ case 0x04: return 0x02000014;
+ }
+
+ return ~0U;
+}
+
static const struct x86_cpu_id deadline_match[] = {
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014),
+ DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
const struct x86_cpu_id *m;
u32 rev;
- if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
m = x86_match_cpu(deadline_match);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 24f749324c0f..9990a71e311f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
- this_leaf = this_cpu_ci->info_list + index;
nshared = base->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 098530a93bb7..debb974fd17d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,3 +1,6 @@
+#ifndef __X86_MCE_INTERNAL_H__
+#define __X86_MCE_INTERNAL_H__
+
#include <linux/device.h>
#include <asm/mce.h>
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif
+
+extern struct mca_config mca_cfg;
+
+#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 40e28ed77fbf..486f640b02ef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -28,6 +28,8 @@
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
+#include "mce-internal.h"
+
#define NR_BLOCKS 5
#define THRESHOLD_MAX 0xFFF
#define INT_TYPE_APIC 0x00020000
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 86e8f0b2537b..c4fa4a85d4cb 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr;
#endif
- if (!have_cpuid_p())
- return *res;
-
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void)
{
unsigned int cpuid_1_eax;
+ bool intel = true;
- if (check_loader_disabled_bsp())
+ if (!have_cpuid_p())
return;
cpuid_1_eax = native_cpuid_eax(1);
switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL:
- if (x86_family(cpuid_1_eax) >= 6)
- load_ucode_intel_bsp();
+ if (x86_family(cpuid_1_eax) < 6)
+ return;
break;
+
case X86_VENDOR_AMD:
- if (x86_family(cpuid_1_eax) >= 0x10)
- load_ucode_amd_bsp(cpuid_1_eax);
+ if (x86_family(cpuid_1_eax) < 0x10)
+ return;
+ intel = false;
break;
+
default:
- break;
+ return;
}
+
+ if (check_loader_disabled_bsp())
+ return;
+
+ if (intel)
+ load_ucode_intel_bsp();
+ else
+ load_ucode_amd_bsp(cpuid_1_eax);
}
static bool check_loader_disabled_ap(void)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8f7a9bbad514..7dbcb7adf797 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
+static bool is_blacklisted(unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
+ pr_err_once("late loading on model 79 is disabled.\n");
+ return true;
+ }
+
+ return false;
+}
+
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index cf2ce063f65a..2902ca4d5993 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
- cr4_init_shadow();
-
+ /* Make sure IDT is set up before any exception happens */
idt_setup_early_handler();
+ cr4_init_shadow();
+
sanitize_boot_params(&boot_params);
x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d63ed0..3fc0f9a794cb 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -3,6 +3,15 @@
/* Kprobes and Optprobes common header */
+#include <asm/asm.h>
+
+#ifdef CONFIG_FRAME_POINTER
+# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
+ " mov %" _ASM_SP ", %" _ASM_BP "\n"
+#else
+# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
+#endif
+
#ifdef CONFIG_X86_64
#define SAVE_REGS_STRING \
/* Skip cs, ip, orig_ax. */ \
@@ -17,7 +26,7 @@
" pushq %r10\n" \
" pushq %r11\n" \
" pushq %rbx\n" \
- " pushq %rbp\n" \
+ SAVE_RBP_STRING \
" pushq %r12\n" \
" pushq %r13\n" \
" pushq %r14\n" \
@@ -48,7 +57,7 @@
" pushl %es\n" \
" pushl %ds\n" \
" pushl %eax\n" \
- " pushl %ebp\n" \
+ SAVE_RBP_STRING \
" pushl %edi\n" \
" pushl %esi\n" \
" pushl %edx\n" \
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f0153714ddac..0742491cbb73 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* raw stack chunk with redzones:
*/
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
- regs->flags &= ~X86_EFLAGS_IF;
- trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e675704fa6f7..8bb9594d0761 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
return NULL;
}
-void kvm_async_pf_task_wait(u32 token)
+/*
+ * @interrupt_kernel: Is this called from a routine which interrupts the kernel
+ * (other than user space)?
+ */
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
{
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
@@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token;
n.cpu = smp_processor_id();
- n.halted = is_idle_task(current) || preempt_count() > 1 ||
- rcu_preempt_depth();
+ n.halted = is_idle_task(current) ||
+ (IS_ENABLED(CONFIG_PREEMPT_COUNT)
+ ? preempt_count() > 1 || rcu_preempt_depth()
+ : interrupt_kernel);
init_swait_queue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
raw_spin_unlock(&b->lock);
@@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
prev_state = exception_enter();
- kvm_async_pf_task_wait((u32)read_cr2());
+ kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
exception_exit(prev_state);
break;
case KVM_PV_REASON_PAGE_READY:
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 54180fa6f66f..add33f600531 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table);
#else
write_cr3(real_mode_header->trampoline_pgd);
+
+ /* Exiting long mode will fail if CR4.PCIDE is set. */
+ if (static_cpu_has(X86_FEATURE_PCID))
+ cr4_clear_bits(X86_CR4_PCIDE);
#endif
/* Jump to the identity-mapped low memory code */
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index d145a0b1f529..3dc26f95d46e 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx);
- for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+ for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
+ sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
break;
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
* This determines if the frame pointer actually contains an encoded pointer to
* pt_regs on the stack. See ENCODE_FRAME_POINTER.
*/
+#ifdef CONFIG_X86_64
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{
unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
return (struct pt_regs *)(regs & ~0x1);
}
+#else
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+ unsigned long regs = (unsigned long)bp;
+
+ if (regs & 0x80000000)
+ return NULL;
+
+ return (struct pt_regs *)(regs | 0x80000000);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
+#else
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
+#endif
static bool update_stack_state(struct unwind_state *state,
unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
regs = decode_frame_pointer(next_bp);
if (regs) {
frame = (unsigned long *)regs;
- len = regs_size(regs);
+ len = KERNEL_REGS_SIZE;
state->got_irq = true;
} else {
frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
frame < prev_frame_end)
return false;
+ /*
+ * On 32-bit with user mode regs, make sure the last two regs are safe
+ * to access:
+ */
+ if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
+ !on_stack(info, frame, len + 2*sizeof(long)))
+ return false;
+
/* Move state to the next frame: */
if (regs) {
state->regs = regs;
@@ -328,6 +355,13 @@ bad_address:
state->regs->sp < (unsigned long)task_pt_regs(state->task))
goto the_end;
+ /*
+ * There are some known frame pointer issues on 32-bit. Disable
+ * unwinder warnings on 32-bit until it gets objtool support.
+ */
+ if (IS_ENABLED(CONFIG_X86_32))
+ goto the_end;
+
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 570b70d3f604..b95007e7c1b3 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
if (unlikely((idx >= lookup_num_blocks-1))) {
- orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n",
- idx, lookup_num_blocks, ip);
+ orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
+ idx, lookup_num_blocks, (void *)ip);
return NULL;
}
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
(__start_orc_unwind + stop > __stop_orc_unwind))) {
- orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n",
- idx, lookup_num_blocks, start, stop, ip);
+ orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
+ idx, lookup_num_blocks, start, stop, (void *)ip);
return NULL;
}
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R10:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg R10 at ip %p\n",
+ orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R13:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg R13 at ip %p\n",
+ orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DI:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg DI at ip %p\n",
+ orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DX:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg DX at ip %p\n",
+ orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown SP base reg %d for ip %p\n",
+ orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
goto done;
}
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
- orc_warn("can't dereference registers at %p for ip %p\n",
+ orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS_IRET:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
- orc_warn("can't dereference iret registers at %p for ip %p\n",
+ orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown .orc_unwind entry type %d\n", orc->type);
+ orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
+ orc->type, (void *)orig_ip);
break;
}
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown BP base reg %d for ip %p\n",
+ orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
goto done;
}
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->stack_info.type == prev_type &&
on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
state->sp <= prev_sp) {
- orc_warn("stack going in the wrong direction? ip=%p\n",
+ orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
goto done;
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3ea624452f93..3c48bc8bf08c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
depends on HIGH_RES_TIMERS
# for TASKSTATS/TASK_DELAY_ACCT:
depends on NET && MULTIUSER
+ depends on X86_LOCAL_APIC
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
select ANON_INODES
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a36254cbf776..d90cdc77e077 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#op " %al \n\t" \
FOP_RET
-asm(".global kvm_fastop_exception \n"
- "kvm_fastop_exception: xor %esi, %esi; ret");
+asm(".pushsection .fixup, \"ax\"\n"
+ ".global kvm_fastop_exception \n"
+ "kvm_fastop_exception: xor %esi, %esi; ret\n"
+ ".popsection");
FOP_START(setcc)
FOP_SETCC(seto)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca30c1eb1d9..7a69cf053711 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
case KVM_PV_REASON_PAGE_NOT_PRESENT:
vcpu->arch.apf.host_apf_reason = 0;
local_irq_disable();
- kvm_async_pf_task_wait(fault_address);
+ kvm_async_pf_task_wait(fault_address, 0);
local_irq_enable();
break;
case KVM_PV_REASON_PAGE_READY:
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte)
{
/*
- * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
- * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
- * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
-
- /*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear
* PT_PAGE_SIZE_MASK in gpte if that is the case.
*/
gpte &= level - mmu->last_nonleaf_level;
+ /*
+ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
+ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+
return gpte & PT_PAGE_SIZE_MASK;
}
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
+ update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 86b68dc5a649..f18d1f8d332b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -334,10 +334,11 @@ retry_walk:
--walker->level;
index = PT_INDEX(addr, walker->level);
-
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
+
+ BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a2b804e10c95..95a01609d7ee 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- kvm_set_cr4(vcpu, vmcs12->host_cr4);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 72bf8c01c6e3..e1f095884386 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,12 @@
-# Kernel does not boot with instrumentation of tlb.c.
-KCOV_INSTRUMENT_tlb.o := n
+# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
+KCOV_INSTRUMENT_tlb.o := n
+KCOV_INSTRUMENT_mem_encrypt.o := n
+
+KASAN_SANITIZE_mem_encrypt.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_mem_encrypt.o = -pg
+endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index a99679826846..320c6237e1d1 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
+
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+ return addr + count <= __pa(high_memory);
+}
+
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+ phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
+
+ return valid_phys_addr_range(addr, count);
+}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49d9778376d7..0f3d0cea4d00 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,7 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
{
@@ -80,7 +81,7 @@ void leave_mm(int cpu)
return;
/* Warn if we're not lazy. */
- WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
+ WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
switch_mm(NULL, &init_mm, NULL);
}
@@ -142,45 +143,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
+ this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) {
- VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
- next->context.ctx_id);
-
- if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
- /*
- * There's nothing to do: we weren't lazy, and we
- * aren't changing our mm. We don't need to flush
- * anything, nor do we need to update CR3, CR4, or
- * LDTR.
- */
- return;
- }
-
- /* Resume remote flushes and then read tlb_gen. */
- cpumask_set_cpu(cpu, mm_cpumask(next));
- next_tlb_gen = atomic64_read(&next->context.tlb_gen);
-
- if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
- next_tlb_gen) {
- /*
- * Ideally, we'd have a flush_tlb() variant that
- * takes the known CR3 value as input. This would
- * be faster on Xen PV and on hypothetical CPUs
- * on which INVPCID is fast.
- */
- this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
- next_tlb_gen);
- write_cr3(build_cr3(next, prev_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
- TLB_FLUSH_ALL);
- }
+ VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+ next->context.ctx_id);
/*
- * We just exited lazy mode, which means that CR4 and/or LDTR
- * may be stale. (Changes to the required CR4 and LDTR states
- * are not reflected in tlb_gen.)
+ * We don't currently support having a real mm loaded without
+ * our cpu set in mm_cpumask(). We have all the bookkeeping
+ * in place to figure out whether we would need to flush
+ * if our cpu were cleared in mm_cpumask(), but we don't
+ * currently use it.
*/
+ if (WARN_ON_ONCE(real_prev != &init_mm &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next))))
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ return;
} else {
u16 new_asid;
bool need_flush;
@@ -199,10 +179,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/* Stop remote flushes for the previous mm */
- if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
-
- VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+ real_prev != &init_mm);
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
/*
* Start remote flushes and then read tlb_gen.
@@ -233,6 +212,40 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/*
+ * Please ignore the name of this function. It should be called
+ * switch_to_kernel_thread().
+ *
+ * enter_lazy_tlb() is a hint from the scheduler that we are entering a
+ * kernel thread or other context without an mm. Acceptable implementations
+ * include doing nothing whatsoever, switching to init_mm, or various clever
+ * lazy tricks to try to minimize TLB flushes.
+ *
+ * The scheduler reserves the right to call enter_lazy_tlb() several times
+ * in a row. It will notify us that we're going back to a real mm by
+ * calling switch_mm_irqs_off().
+ */
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
+ return;
+
+ if (tlb_defer_switch_to_init_mm()) {
+ /*
+ * There's a significant optimization that may be possible
+ * here. We have accurate enough TLB flush tracking that we
+ * don't need to maintain coherence of TLB per se when we're
+ * lazy. We do, however, need to maintain coherence of
+ * paging-structure caches. We could, in principle, leave our
+ * old mm loaded and only switch to init_mm when
+ * tlb_remove_page() happens.
+ */
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
+ } else {
+ switch_mm(NULL, &init_mm, NULL);
+ }
+}
+
+/*
* Call this when reinitializing a CPU. It fixes the following potential
* problems:
*
@@ -303,16 +316,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
+ if (unlikely(loaded_mm == &init_mm))
+ return;
+
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
+ if (this_cpu_read(cpu_tlbstate.is_lazy)) {
/*
- * We're in lazy mode -- don't flush. We can get here on
- * remote flushes due to races and on local flushes if a
- * kernel thread coincidentally flushes the mm it's lazily
- * still using.
+ * We're in lazy mode. We need to at least flush our
+ * paging-structure cache to avoid speculatively reading
+ * garbage into our TLB. Since switching to init_mm is barely
+ * slower than a minimal flush, just switch to init_mm.
*/
+ switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 8c9573660d51..0554e8aef4d5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
/* if (index >= array->map.max_entries)
* goto out;
*/
- EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
+ EMIT2(0x89, 0xD2); /* mov edx, edx */
+ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
- EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
#define OFFSET1 43 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0e7ef69e8531..d669e9d89001 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
- "x86/xen/hvm_guest:prepare",
+ "x86/xen/guest:prepare",
cpu_up_prepare_cb, cpu_dead_cb);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "x86/xen/hvm_guest:online",
+ "x86/xen/guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/block/bio.c b/block/bio.c
index b38e962fa83e..101c2a9b5481 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- iov_iter_init(&bmd->iter, iter->type, bmd->iov,
- iter->nr_segs, iter->count);
+ bmd->iter = *iter;
+ bmd->iter.iov = bmd->iov;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset;
struct iov_iter i;
struct iovec iov;
+ struct bio_vec *bvec;
iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE,
&pages[cur_page]);
- if (ret < local_nr_pages) {
+ if (unlikely(ret < local_nr_pages)) {
+ for (j = cur_page; j < page_limit; j++) {
+ if (!pages[j])
+ break;
+ put_page(pages[j]);
+ }
ret = -EFAULT;
goto out_unmap;
}
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
offset = offset_in_page(uaddr);
for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset;
+ unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0)
break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bytes)
break;
+ /*
+ * check if vector was merged with previous
+ * drop page reference if needed
+ */
+ if (bio->bi_vcnt == prev_bi_vcnt)
+ put_page(pages[j]);
+
len -= bytes;
offset = 0;
}
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- for (j = 0; j < nr_pages; j++) {
- if (!pages[j])
- break;
- put_page(pages[j]);
+ bio_for_each_segment_all(bvec, bio, j) {
+ put_page(bvec->bv_page);
}
out:
kfree(pages);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 980e73095643..de294d775acf 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
goto err;
/*
- * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
+ * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
* didn't exist yet (because we don't know what to name the directory
* until the queue is registered to a gendisk).
*/
+ if (q->elevator && !q->sched_debugfs_dir)
+ blk_mq_debugfs_register_sched(q);
+
+ /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
goto err;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0fea76aa0f3f..17816a028dcb 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
tg->disptime = jiffies - 1;
throtl_select_dispatch(sq);
- throtl_schedule_next_dispatch(sq, false);
+ throtl_schedule_next_dispatch(sq, true);
}
rcu_read_unlock();
throtl_select_dispatch(&td->service_queue);
- throtl_schedule_next_dispatch(&td->service_queue, false);
+ throtl_schedule_next_dispatch(&td->service_queue, true);
queue_work(kthrotld_workqueue, &td->dispatch_work);
}
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index dbddff8174e5..15d25ccd51a5 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
+ /* called right after the request is allocated for the request_queue */
+
+ sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
+ if (!sreq->sense)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bsg_initialize_rq(struct request *req)
+{
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ struct scsi_request *sreq = &job->sreq;
+ void *sense = sreq->sense;
+
+ /* called right before the request is given to the request_queue user */
+
memset(job, 0, sizeof(*job));
scsi_req_init(sreq);
+
+ sreq->sense = sense;
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
- sreq->sense = kzalloc(sreq->sense_len, gfp);
- if (!sreq->sense)
- return -ENOMEM;
job->req = req;
- job->reply = sreq->sense;
+ job->reply = sense;
job->reply_len = sreq->sense_len;
job->dd_data = job + 1;
-
- return 0;
}
static void bsg_exit_rq(struct request_queue *q, struct request *req)
@@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
q->init_rq_fn = bsg_init_rq;
q->exit_rq_fn = bsg_exit_rq;
+ q->initialize_rq_fn = bsg_initialize_rq;
q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q);
diff --git a/crypto/shash.c b/crypto/shash.c
index 5e31c8d776df..325a14da5827 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_KERNEL);
+ buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- struct scatterlist *sg = req->src;
- unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
int err;
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4faa0fd53b0c..d5692e35fab1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
static int skcipher_walk_first(struct skcipher_walk *walk)
{
- walk->nbytes = 0;
-
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
- if (unlikely(!walk->total))
- return 0;
-
walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ walk->total = req->cryptlen;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);
diff --git a/crypto/xts.c b/crypto/xts.c
index d86c11a8c882..e31828ed0046 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
+ "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_drop_spawn;
+ }
} else
goto err_drop_spawn;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9565d572f8dd..de56394dd161 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1178,12 +1178,44 @@ dev_put:
return ret;
}
+static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+{
+ if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ struct acpi_iort_node *parent;
+ struct acpi_iort_id_mapping *map;
+ int i;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
+ iort_node->mapping_offset);
+
+ for (i = 0; i < iort_node->mapping_count; i++, map++) {
+ if (!map->output_reference)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node,
+ iort_table, map->output_reference);
+ /*
+ * If we detect a RC->SMMU mapping, make sure
+ * we enable ACS on the system.
+ */
+ if ((parent->type == ACPI_IORT_NODE_SMMU) ||
+ (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
+ pci_request_acs();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static void __init iort_init_platform_devices(void)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
+ bool acs_enabled = false;
/*
* iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
return;
}
+ if (!acs_enabled)
+ acs_enabled = iort_enable_acs(iort_node);
+
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3fb8ff513461..e26ea209b63e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* }
* }
*
- * Calling this function with index %2 return %-ENOENT and with index %3
- * returns the last entry. If the property does not contain any more values
- * %-ENODATA is returned. The NULL entry must be single integer and
- * preferably contain value %0.
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
data = acpi_device_data_of_node(fwnode);
if (!data)
- return -EINVAL;
+ return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
- return ret;
+ return ret == -EINVAL ? -ENOENT : -EINVAL;
/*
* The simplest case is when the value is a single reference. Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret)
- return ret;
+ return ret == -ENODEV ? -EINVAL : ret;
args->adev = device;
args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
* The index argument is then used to determine which reference
* the caller wants (along with the arguments).
*/
- if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
- return -EPROTO;
+ if (obj->type != ACPI_TYPE_PACKAGE)
+ return -EINVAL;
+ if (index >= obj->package.count)
+ return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
- return -ENODEV;
+ return -EINVAL;
nargs = 0;
element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
break;
else
- return -EPROTO;
+ return -EINVAL;
}
if (nargs > MAX_ACPI_REFERENCE_ARGS)
- return -EPROTO;
+ return -EINVAL;
if (idx == index) {
args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
element++;
} else {
- return -EPROTO;
+ return -EINVAL;
}
idx++;
}
- return -ENODATA;
+ return -ENOENT;
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index ab34239a76ee..0621a95b8597 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true;
}
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node: struct binder_node for which to get refs
+ * @proc: returns @node->proc if valid
+ * @error: if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+ struct binder_node *node,
+ struct binder_proc **procp,
+ uint32_t *error)
+{
+ struct binder_node *target_node = NULL;
+
+ binder_node_inner_lock(node);
+ if (node->proc) {
+ target_node = node;
+ binder_inc_node_nilocked(node, 1, 0, NULL);
+ binder_inc_node_tmpref_ilocked(node);
+ node->proc->tmp_ref++;
+ *procp = node->proc;
+ } else
+ *error = BR_DEAD_REPLY;
+ binder_node_inner_unlock(node);
+
+ return target_node;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
- binder_inc_node(ref->node, 1, 0, NULL);
- target_node = ref->node;
- }
- binder_proc_unlock(proc);
- if (target_node == NULL) {
+ target_node = binder_get_node_refs_for_txn(
+ ref->node, &target_proc,
+ &return_error);
+ } else {
binder_user_error("%d:%d got transaction to invalid handle\n",
- proc->pid, thread->pid);
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- return_error_param = -EINVAL;
- return_error_line = __LINE__;
- goto err_invalid_target_handle;
}
+ binder_proc_unlock(proc);
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
- if (target_node == NULL) {
+ if (target_node)
+ target_node = binder_get_node_refs_for_txn(
+ target_node, &target_proc,
+ &return_error);
+ else
return_error = BR_DEAD_REPLY;
- mutex_unlock(&context->context_mgr_node_lock);
- return_error_line = __LINE__;
- goto err_no_context_mgr_node;
- }
- binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
- e->to_node = target_node->debug_id;
- binder_node_lock(target_node);
- target_proc = target_node->proc;
- if (target_proc == NULL) {
- binder_node_unlock(target_node);
- return_error = BR_DEAD_REPLY;
+ if (!target_node) {
+ /*
+ * return_error is set above
+ */
+ return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
- binder_inner_proc_lock(target_proc);
- target_proc->tmp_ref++;
- binder_inner_proc_unlock(target_proc);
- binder_node_unlock(target_node);
+ e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
@@ -3090,6 +3126,8 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
-err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
- if (target_node)
+ if (target_node) {
binder_dec_node(target_node, 1, 0);
+ binder_dec_node_tmpref(target_node);
+ }
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 8fe165844e47..064f5e31ec55 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
struct binder_alloc *alloc;
uintptr_t page_addr;
size_t index;
+ struct vm_area_struct *vma;
alloc = page->alloc;
if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- if (alloc->vma) {
+ vma = alloc->vma;
+ if (vma) {
mm = get_task_mm(alloc->tsk);
if (!mm)
goto err_get_task_mm_failed;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+ if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(alloc->vma,
+ zap_page_range(vma,
page_addr + alloc->user_buffer_offset,
PAGE_SIZE);
@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_kernel_end(alloc, index);
- list_lru_isolate(lru, item);
-
+ spin_lock(lock);
mutex_unlock(&alloc->mutex);
- return LRU_REMOVED;
+ return LRU_REMOVED_RETRY;
err_down_write_mmap_sem_failed:
- mmput(mm);
+ mmput_async(mm);
err_get_task_mm_failed:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3855902f2c5b..aae2402f3791 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
+ ssize_t n;
+ cpumask_var_t mask;
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
- return cpumap_print_to_pagebuf(list, buf, mask);
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return 0;
+
+ cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+ n = cpumap_print_to_pagebuf(list, buf, mask);
+ free_cpumask_var(mask);
+
+ return n;
}
static inline ssize_t node_read_cpumask(struct device *dev,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d0b65bbe7e15..7ed99c1b2a8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,6 +21,7 @@
#include <linux/phy.h>
struct property_set {
+ struct device *dev;
struct fwnode_handle fwnode;
const struct property_entry *properties;
};
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Caller is responsible to call fwnode_handle_put() on the returned
* args->fwnode pointer.
*
+ * Returns: %0 on success
+ * %-ENOENT when the index is out of bounds, the index has an empty
+ * reference or the property was not found
+ * %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
+ struct property_set *pset;
fwnode = dev_fwnode(dev);
if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode);
+ if (pset) {
set_primary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
} else {
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode->secondary);
+ if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
- }
}
+ if (pset && dev == pset->dev)
+ pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
+ p->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4a438b8abe27..2dfe99b328f8 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,7 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
- depends on CONFIGFS_FS
+ select CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3684e21d543f..883dfebd3014 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
+ if (ret < 0)
+ ret = BLK_STS_IOERR;
+ else if (!ret)
+ ret = BLK_STS_OK;
complete(&cmd->send_complete);
- return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
+ return ret;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2981c27d3aae..f149d3e61234 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
}
-static bool zram_same_page_read(struct zram *zram, u32 index,
- struct page *page,
- unsigned int offset, unsigned int len)
-{
- zram_slot_lock(zram, index);
- if (unlikely(!zram_get_handle(zram, index) ||
- zram_test_flag(zram, index, ZRAM_SAME))) {
- void *mem;
-
- zram_slot_unlock(zram, index);
- mem = kmap_atomic(page);
- zram_fill_page(mem + offset, len,
- zram_get_element(zram, index));
- kunmap_atomic(mem);
- return true;
- }
- zram_slot_unlock(zram, index);
-
- return false;
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zram_slot_unlock(zram, index);
}
- if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
- return 0;
-
zram_slot_lock(zram, index);
handle = zram_get_handle(zram, index);
+ if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
+ unsigned long value;
+ void *mem;
+
+ value = handle ? zram_get_element(zram, index) : 0;
+ mem = kmap_atomic(page);
+ zram_fill_page(mem, PAGE_SIZE, value);
+ kunmap_atomic(mem);
+ zram_slot_unlock(zram, index);
+ return 0;
+ }
+
size = zram_get_obj_size(zram, index);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index c834f5abfc49..4c10456f8a32 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -105,6 +105,7 @@ err:
return ret;
}
+EXPORT_SYMBOL_GPL(clk_bulk_prepare);
#endif /* CONFIG_HAVE_CLK_PREPARE */
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 62d7854e4b87..5970a50671b9 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 0, GFLAGS),
GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 2, GFLAGS),
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(2), 15, GFLAGS),
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
/* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
"aclk_peri",
"hclk_peri",
"pclk_peri",
+ "pclk_pmu",
+ "sclk_timer5",
};
static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index e40b77583c47..d8d3cb67b402 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
#define PLL_ENABLED (1 << 31)
#define PLL_LOCKED (1 << 29)
+static void exynos4_clk_enable_pll(u32 reg)
+{
+ u32 pll_con = readl(reg_base + reg);
+ pll_con |= PLL_ENABLED;
+ writel(pll_con, reg_base + reg);
+
+ while (!(pll_con & PLL_LOCKED)) {
+ cpu_relax();
+ pll_con = readl(reg_base + reg);
+ }
+}
+
static void exynos4_clk_wait_for_pll(u32 reg)
{
u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
samsung_clk_save(reg_base, exynos4_save_pll,
ARRAY_SIZE(exynos4_clk_pll_regs));
+ exynos4_clk_enable_pll(EPLL_CON0);
+ exynos4_clk_enable_pll(VPLL_CON0);
+
if (exynos4_soc == EXYNOS4210) {
samsung_clk_save(reg_base, exynos4_save_soc,
ARRAY_SIZE(exynos4210_clk_save));
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index d9fbbf01062b..0f9754e07719 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
/* The crypto framework makes it hard to avoid this global. */
static struct device *artpec6_crypto_dev;
-static struct dentry *dbgfs_root;
-
#ifdef CONFIG_FAULT_INJECTION
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
char *desc;
};
+static struct dentry *dbgfs_root;
+
static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index b585ce54a802..4835dd4a9e50 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct scatterlist sg[1], *tsg;
- int err = 0, len = 0, reg, ncp;
+ int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
- const u32 *buffer = (const u32 *)rctx->buffer;
+ u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
reg |= HASH_CR_DMAA;
stm32_hash_write(hdev, HASH_CR, reg);
- for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
- stm32_hash_write(hdev, HASH_DIN, buffer[i]);
-
- stm32_hash_set_nblw(hdev, ncp);
+ if (ncp) {
+ memset(buffer + ncp, 0,
+ DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+ writesl(hdev->io_base + HASH_DIN, buffer,
+ DIV_ROUND_UP(ncp, sizeof(u32)));
+ }
+ stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 66fb40d0ebdb..03830634e141 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -383,7 +383,7 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct dma_fence *fence,
+static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
+
+ return info->status;
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* sync_fence_info and return the actual number of fences on
* info->num_fences.
*/
- if (!info.num_fences)
+ if (!info.num_fences) {
+ info.status = dma_fence_is_signaled(sync_file->fence);
goto no_fences;
+ } else {
+ info.status = 1;
+ }
if (info.num_fences < num_fences)
return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < num_fences; i++)
- sync_fill_fence_info(fences[i], &fence_info[i]);
+ for (i = 0; i < num_fences; i++) {
+ int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+ info.status = info.status <= 0 ? info.status : status;
+ }
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name));
- info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 32905d5606ac..339186f25a2a 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc;
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list);
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new;
dma_cookie_t cookie;
+ unsigned long flags;
new = tx_to_desc(tx);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc;
size_t copy;
u32 desc_cnt;
+ unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i;
struct scatterlist *sg;
u32 stride;
+ unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl);
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan)
{
struct msgdma_device *mdev = to_mdev(chan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan)
{
struct msgdma_device *mdev = to_mdev(dchan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq);
}
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
+ unsigned long flags;
- spin_lock(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
* bits. So we need to just drop these values.
*/
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+ status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
}
- spin_unlock(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80a4815..a7ea20e7b8e9 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2f65a8fde21d..f1d04b70ee67 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests);
- mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) {
+ mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
+ mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0];
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3388d54ba114..3f80f167ed56 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,7 +453,8 @@ config GPIO_TS4800
config GPIO_THUNDERX
tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
- depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+ depends on PCI_MSI
+ select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
help
Say yes here to support the on-chip GPIO lines on the ThunderX
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dbf869fb63ce..3233b72b6828 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- irq_set_handler_locked(d, handle_edge_irq);
+ /*
+ * Edge IRQs are already cleared/acked in irq_handler and
+ * not need to be masked, as result handle_edge_irq()
+ * logic is excessed here and may cause lose of interrupts.
+ * So just use handle_simple_irq.
+ */
+ irq_set_handler_locked(d, handle_simple_irq);
return 0;
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
{
void __iomem *isr_reg = NULL;
- u32 isr;
+ u32 enabled, isr, level_mask;
unsigned int bit;
struct gpio_bank *bank = gpiobank;
unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
pm_runtime_get_sync(bank->chip.parent);
while (1) {
- u32 isr_saved, level_mask = 0;
- u32 enabled;
-
raw_spin_lock_irqsave(&bank->lock, lock_flags);
enabled = omap_get_gpio_irqbank_mask(bank);
- isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+ isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask)
level_mask = bank->level_mask & enabled;
+ else
+ level_mask = 0;
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ if (isr & ~level_mask)
+ omap_clear_gpio_irqbank(bank, isr & ~level_mask);
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
/*---------------------------------------------------------------------*/
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
{
static bool called;
u32 rev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4d2113530735..eb4528c87c0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (pin <= 255) {
char ev_name[5];
- sprintf(ev_name, "_%c%02X",
+ sprintf(ev_name, "_%c%02hhX",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28a34d9..bc746131987f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4e53aae9a1fb..0028591f3f95 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2960,6 +2960,7 @@ out:
drm_modeset_backoff(&ctx);
}
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19404c96eeb1..af289d35b77a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ unsigned long flags;
+
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+
+ spin_lock_irqsave(&request->engine->timeline->lock, flags);
+ __i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
+ spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
static void engine_set_wedged(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->encoder->base.id,
connector->encoder->name);
- /* ELD Conn_Type */
- connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc_state))
- connector->eld[5] |= (1 << 2);
-
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 183e87e8ea31..5d4cd3d00564 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
@@ -1233,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
{
enum port port;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index ff9ecd211abb..b8315bca852b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,7 +74,7 @@
#define I9XX_CSC_COEFF_1_0 \
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{
return !state->degamma_lut &&
!state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
}
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy(state)) {
+ if (!crtc_state_is_legacy_gamma(state)) {
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
glk_load_degamma_lut(state);
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
uint32_t i, lut_size;
uint32_t word0, word1;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
return 0;
/*
- * We also allow no degamma lut and a gamma lut at the legacy
+ * We also allow no degamma lut/ctm and a gamma lut at the legacy
* size (256 entries).
*/
- if (!crtc_state->degamma_lut &&
- crtc_state->gamma_lut &&
- crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+ if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..92c1f8e166dc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4b4fd1f8110b..476681d5940c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
out:
if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
- if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+ if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
DRM_ERROR("Port %c enabled but PHY powered down? "
"(PHY_CTL %08x)\n", port_name(port), tmp);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 00cd17c76fdc..5c7828c52d12 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_state *pipe_config;
- int htot = I915_READ(HTOTAL(cpu_transcoder));
- int hsync = I915_READ(HSYNC(cpu_transcoder));
- int vtot = I915_READ(VTOTAL(cpu_transcoder));
- int vsync = I915_READ(VSYNC(cpu_transcoder));
+ u32 htot, hsync, vtot, vsync;
enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
i9xx_crtc_clock_get(intel_crtc, pipe_config);
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ cpu_transcoder = pipe_config->cpu_transcoder;
+ htot = I915_READ(HTOTAL(cpu_transcoder));
+ hsync = I915_READ(HSYNC(cpu_transcoder));
+ vtot = I915_READ(VTOTAL(cpu_transcoder));
+ vsync = I915_READ(VSYNC(cpu_transcoder));
+
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -12359,7 +12363,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- bool hw_check = intel_state->modeset;
u64 put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
@@ -12376,7 +12379,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- hw_check = true;
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64134947c0aa..203198659ab2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp);
+ intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12);
}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 09b670929786..de38d014ed39 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
- return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
- BIT(phy_info->channel[DPIO_CH0].port);
-}
-
static const struct bxt_ddi_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- enum port port;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
- for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
#include "intel_drv.h"
#include "i915_drv.h"
+static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
+{
+ u8 conn_type;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ conn_type = DRM_ELD_CONN_TYPE_DP;
+ } else {
+ conn_type = DRM_ELD_CONN_TYPE_HDMI;
+ }
+
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ intel_connector_update_eld_conn_type(connector);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b66d8e136aa3..49577eba8e7e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = power_well->id;
bool wait_fuses = power_well->hsw.has_fuses;
- enum skl_power_gate pg;
+ enum skl_power_gate uninitialized_var(pg);
u32 val;
if (wait_fuses) {
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
#undef CNL_PROCMON_IDX
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index dbb31a014419..deaf869374ea 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -248,7 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index c2bdad88447e..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6fcb58ab718c..440977677001 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- pm_runtime_put_autosuspend(&pdev->dev);
-
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f15821a0d900..ea5bb0e1632c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
- if (!exclusive) {
- /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
- * which makes this a slightly strange place to call it. OTOH this
- * is a convenient can-fail point to hook it in. (And similar to
- * how etnaviv and nouveau handle this.)
- */
- ret = reservation_object_reserve_shared(msm_obj->resv);
- if (ret)
- return ret;
- }
-
fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
}
vaddr = msm_gem_get_vaddr(obj);
- if (!vaddr) {
+ if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(vaddr);
}
if (bo)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5d0a75d4b249..93535cac0676 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -221,7 +221,7 @@ fail:
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit)
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{
int i, ret = 0;
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+ if (!write) {
+ /* NOTE: _reserve_shared() must happen before
+ * _add_shared_fence(), which makes this a slightly
+ * strange place to call it. OTOH this is a
+ * convenient can-fail point to hook it in.
+ */
+ ret = reservation_object_reserve_shared(msm_obj->resv);
+ if (ret)
+ return ret;
+ }
+
+ if (no_implicit)
+ continue;
+
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret)
break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
- ret = submit_fence_sync(submit);
- if (ret)
- goto out;
- }
+ ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+ if (ret)
+ goto out;
ret = submit_pin_objects(submit);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ffbff27600e0..6a887032c66a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace) {
+
+ if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
msm_gem_address_space_put(gpu->aspace);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0366b8092f97..ec56794ad039 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_SPACE_TO_END() does not access the tail more
+ * than once.
+ */
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
- fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n;
ptr += n;
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
if (ret)
goto out;
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_CNT_TO_END() does not access the head more than
+ * once.
+ */
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT;
goto out;
}
- fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n;
wake_up_all(&rd->fifo_event);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..3cf1a6932fac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(hdmi->mod_clk)) {
dev_err(dev, "Couldn't get the HDMI mod clock\n");
- return PTR_ERR(hdmi->mod_clk);
+ ret = PTR_ERR(hdmi->mod_clk);
+ goto err_disable_bus_clk;
}
clk_prepare_enable(hdmi->mod_clk);
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
if (IS_ERR(hdmi->pll0_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
- return PTR_ERR(hdmi->pll0_clk);
+ ret = PTR_ERR(hdmi->pll0_clk);
+ goto err_disable_mod_clk;
}
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
if (IS_ERR(hdmi->pll1_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
- return PTR_ERR(hdmi->pll1_clk);
+ ret = PTR_ERR(hdmi->pll1_clk);
+ goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
- return ret;
+ goto err_disable_mod_clk;
}
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
- return ret;
+ goto err_disable_mod_clk;
}
drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
drm_encoder_cleanup(&hdmi->encoder);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
+err_disable_mod_clk:
+ clk_disable_unprepare(hdmi->mod_clk);
+err_disable_bus_clk:
+ clk_disable_unprepare(hdmi->bus_clk);
return ret;
}
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ clk_disable_unprepare(hdmi->mod_clk);
+ clk_disable_unprepare(hdmi->bus_clk);
}
static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6a573d21d3cc..658fa2d3e40c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
return -EINVAL;
}
+ /*
+ * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+ * i.MX53 channel arbitration locking doesn't seem to work properly.
+ * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+ */
+ if (bursts && ipu->ipu_type != IPUV3H)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
if (channel->num == idmac_lock_en_info[i].chnum)
break;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c35f74c83065..c860a7997cb5 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -73,6 +73,14 @@
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
+#define IPU_PRE_STORE_ENG_STATUS 0x120
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
+
#define IPU_PRE_STORE_ENG_SIZE 0x130
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
dma_addr_t buffer_paddr;
void *buffer_virt;
bool in_use;
+ unsigned int safe_window_end;
};
static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
u32 active_bpp = info->cpp[0] >> 1;
u32 val;
+ /* calculate safe window for ctrl register updates */
+ pre->safe_window_end = height - 2;
+
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+ unsigned short current_yblock;
+ u32 val;
+
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+ return;
+ }
+
+ val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+ current_yblock =
+ (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+ IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+ } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index ecc9ea44dc50..0013ca9f72c8 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+ /* wait for both double buffers to be filled */
+ readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+ (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+ (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+ 5, 1000);
+
clk_disable_unprepare(prg->clk_ipg);
chan->enabled = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0a3117cc29e7..374301fcbc86 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -281,6 +281,7 @@ config HID_ELECOM
Support for ELECOM devices:
- BM084 Bluetooth Mouse
- DEFT Trackball (Wired and wireless)
+ - HUGE Trackball (Wired and wireless)
config HID_ELO
tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9bc91160819b..330ca983828b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e2c7465df69f..54aeea57d209 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
*/
/*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
- /* The DEFT trackball has eight buttons, but its descriptor only
- * reports five, disabling the three Fn buttons on the top of
- * the mouse.
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+ /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+ * only reports five, disabling the three Fn buttons on the top
+ * of the mouse.
*
* Apply the following diff to the descriptor:
*
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* End Collection, End Collection,
*/
if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+ hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
rdesc[13] = 8; /* Button/Variable Report Count */
rdesc[21] = 8; /* Button/Variable Usage Maximum */
rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b397a14ab970..be2e005c3c51 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -368,6 +368,8 @@
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
@@ -533,6 +535,7 @@
#define USB_VENDOR_ID_IDEACOM 0x1cb6
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
+#define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -660,6 +663,7 @@
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
+#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 440b999304a5..9e8c4d2ba11d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_DG_PEN &&
field->application != HID_DG_TOUCHPAD &&
field->application != HID_GD_KEYBOARD &&
+ field->application != HID_GD_SYSTEM_CONTROL &&
field->application != HID_CP_CONSUMER_CONTROL &&
field->application != HID_GD_WIRELESS_RADIO_CTLS &&
!(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
+ /* Lenovo X1 TAB Gen 2 */
+ { .driver_data = MT_CLS_WIN_8_DUAL,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB) },
+
/* Anton devices */
{ .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 5b40c2614599..ef241d66562e 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
if (!(data->device_flags & RMI_DEVICE))
return 0;
- ret = rmi_reset_attn_mode(hdev);
+ /* Make sure the HID device is ready to receive events */
+ ret = hid_hw_open(hdev);
if (ret)
return ret;
+ ret = rmi_reset_attn_mode(hdev);
+ if (ret)
+ goto out;
+
ret = rmi_driver_resume(rmi_dev, false);
if (ret) {
hid_warn(hdev, "Failed to resume device: %d\n", ret);
- return ret;
+ goto out;
}
- return 0;
+out:
+ hid_hw_close(hdev);
+ return ret;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ec530454e6f6..5fbe0f81ab2e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
kfree(hidraw);
} else {
/* close device for last reader */
- hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
hid_hw_close(hidraw->hid);
+ hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
}
}
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 77396145d2d0..9145c2129a96 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
- int args_len = sizeof(__u8) + /* optional ReportID byte */
+ int args_len = sizeof(__u8) + /* ReportID */
+ sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 089bad8a9a21..045b5da9b992 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a83fa76655b9..f489a5cfcb48 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e82a696a1d07..906e654fb0ba 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
/* Try to find an already-probed interface from the same device */
list_for_each_entry(data, &wacom_udev_list, list) {
- if (compare_device_paths(hdev, data->dev, '/'))
+ if (compare_device_paths(hdev, data->dev, '/')) {
+ kref_get(&data->kref);
return data;
+ }
}
/* Fallback to finding devices that appear to be "siblings" */
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
if (!wacom->led.groups)
return -ENOTSUPP;
+ if (wacom->wacom_wac.features.type == REMOTE)
+ return -ENOTSUPP;
+
if (wacom->wacom_wac.pid) { /* wireless connected */
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bb17d7bbefd3..aa692e28b2cd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
keys = data[9] & 0x07;
}
} else {
- buttons = ((data[6] & 0x10) << 10) |
- ((data[5] & 0x10) << 9) |
+ buttons = ((data[6] & 0x10) << 5) |
+ ((data[5] & 0x10) << 4) |
((data[6] & 0x0F) << 4) |
(data[5] & 0x0F);
}
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
continue;
if (range) {
+ /* Fix rotation alignment: userspace expects zero at left */
+ int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
+ rotation += 1800/4;
+ if (rotation > 899)
+ rotation -= 1800;
+
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
- input_report_abs(pen_input, ABS_TILT_X, frame[7]);
- input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
- input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+ input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
+ input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
+ input_report_abs(pen_input, ABS_Z, rotation);
input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
}
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
unsigned char *data = wacom->data;
int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
- int ring = data[285];
- int prox = buttons | (ring & 0x80);
+ int ring = data[285] & 0x7F;
+ bool ringstatus = data[285] & 0x80;
+ bool prox = buttons || ringstatus;
+
+ /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
+ ring = 71 - ring;
+ ring += 3*72/16;
+ if (ring > 71)
+ ring -= 72;
wacom_report_numbered_buttons(pad_input, 9, buttons);
- input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+ input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
+ int value, int num, int denom)
+{
+ struct input_absinfo *abs = &input->absinfo[usage->code];
+ int range = (abs->maximum - abs->minimum + 1);
+
+ value += num*range/denom;
+ if (value > abs->maximum)
+ value -= range;
+ else if (value < abs->minimum)
+ value += range;
+ return value;
+}
+
int wacom_equivalent_usage(int usage)
{
if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
bool is_touch_on = value;
+ bool do_report = false;
/*
* Avoid reporting this event and setting inrange_state if this usage
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
}
switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRING:
+ /*
+ * Userspace expects touchrings to increase in value with
+ * clockwise gestures and have their zero point at the
+ * tablet's left. HID events "should" be clockwise-
+ * increasing and zero at top, though the MobileStudio
+ * Pro and 2nd-gen Intuos Pro don't do this...
+ */
+ if (hdev->vendor == 0x56a &&
+ (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
+ hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
+ value = (field->logical_maximum - value);
+
+ if (hdev->product == 0x357 || hdev->product == 0x358)
+ value = wacom_offset_rotation(input, usage, value, 3, 16);
+ else if (hdev->product == 0x34d || hdev->product == 0x34e)
+ value = wacom_offset_rotation(input, usage, value, 1, 2);
+ }
+ else {
+ value = wacom_offset_rotation(input, usage, value, 1, 4);
+ }
+ do_report = true;
+ break;
case WACOM_HID_WD_TOUCHRINGSTATUS:
if (!value)
input_event(input, usage->type, usage->code, 0);
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
value, i);
/* fall through*/
default:
+ do_report = true;
+ break;
+ }
+
+ if (do_report) {
input_event(input, usage->type, usage->code, value);
if (value)
wacom_wac->hid_data.pad_input_event_flag = true;
- break;
}
}
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.tipswitch |= value;
return;
case HID_DG_TOOLSERIALNUMBER:
- wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
- wacom_wac->serial[0] |= (__u32)value;
+ if (value) {
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= (__u32)value;
+ }
return;
+ case HID_DG_TWIST:
+ /*
+ * Userspace expects pen twist to have its zero point when
+ * the buttons/finger is on the tablet's left. HID values
+ * are zero when buttons are toward the top.
+ */
+ value = wacom_offset_rotation(input, usage, value, 1, 4);
+ break;
case WACOM_HID_WD_SENSE:
wacom_wac->hid_data.sense_state = value;
return;
case WACOM_HID_WD_SERIALHI:
- wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
- wacom_wac->serial[0] |= ((__u64)value) << 32;
- /*
- * Non-USI EMR devices may contain additional tool type
- * information here. See WACOM_HID_WD_TOOLTYPE case for
- * more details.
- */
- if (value >> 20 == 1) {
- wacom_wac->id[0] |= value & 0xFFFFF;
+ if (value) {
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
}
return;
case WACOM_HID_WD_TOOLTYPE:
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
input_report_key(input, wacom_wac->tool[0], prox);
if (wacom_wac->serial[0]) {
input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
- input_report_abs(input, ABS_MISC, id);
+ input_report_abs(input, ABS_MISC, prox ? id : 0);
}
wacom_wac->hid_data.tipswitch = false;
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
if (!prox) {
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
+ wacom_wac->serial[0] = 0;
}
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index efd5db743319..894b67ac2cae 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
*/
return;
}
+ mutex_lock(&vmbus_connection.channel_mutex);
/*
* Close all the sub-channels first and then close the
* primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
vmbus_close_internal(cur_channel);
if (cur_channel->rescind) {
- mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(cur_channel,
+ hv_process_channel_removal(
cur_channel->offermsg.child_relid);
- mutex_unlock(&vmbus_connection.channel_mutex);
}
}
/*
* Now close the primary.
*/
vmbus_close_internal(channel);
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
EXPORT_SYMBOL_GPL(vmbus_close);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index bcbb031f7263..018d2e0f8ec5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-
+ channel->rescind = true;
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
true);
}
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
+void hv_process_channel_removal(u32 relid)
{
unsigned long flags;
- struct vmbus_channel *primary_channel;
+ struct vmbus_channel *primary_channel, *channel;
- BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ /*
+ * Make sure channel is valid as we may have raced.
+ */
+ channel = relid2channel(relid);
+ if (!channel)
+ return;
+
+ BUG_ON(!channel->rescind);
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) {
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
+ newchannel->probe_done = true;
return;
}
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
- unsigned long flags;
struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return;
}
- spin_lock_irqsave(&channel->lock, flags);
- channel->rescind = true;
- spin_unlock_irqrestore(&channel->lock, flags);
-
- /*
- * Now that we have posted the rescind state, perform
- * rescind related cleanup.
- */
- vmbus_rescind_cleanup(channel);
-
/*
* Now wait for offer handling to complete.
*/
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
+ vmbus_rescind_cleanup(channel);
return;
}
/*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/
dev = get_device(&channel->device_obj->device);
if (dev) {
+ vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 1. Close all sub-channels first
* 2. Then close the primary channel.
*/
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) {
/*
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
- mutex_unlock(&vmbus_connection.channel_mutex);
+ hv_process_channel_removal(rescind->child_relid);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a9d49f6f6501..937801ac2fe0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
+ hv_process_channel_removal(channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 9c0dbb8191ad..e1be61095532 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
GFP_KERNEL);
if (rc)
- goto out_mbox_free;
+ return -ENOMEM;
INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"SLIMpro mailbox channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
/*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (!cppc_ss) {
dev_err(&pdev->dev, "PPC subspace not found\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
/*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->pcc_comm_addr) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
- goto out_mbox_free;
+ goto out;
}
/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c06dce2c1da7..45a3f3ca29b3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Gemini Lake (SOC)
Cannon Lake-H (PCH)
Cannon Lake-LP (PCH)
+ Cedar Fork (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e114e4e00d29..9e12a53ef7b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
* Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
+ * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -204,6 +205,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 84fb35f6837f..eb1d91b986fd 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = {
};
module_platform_driver(img_scb_i2c_driver);
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
MODULE_DESCRIPTION("IMG host I2C driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 22e08ae1704f..25fcc3c1e32b 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
static const struct of_device_id sprd_i2c_of_match[] = {
{ .compatible = "sprd,sc9860-i2c", },
+ {},
};
static struct platform_driver sprd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 47c67b0ca896..d4a6e9c2e9aa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
unsigned int msg_num;
unsigned int msg_id;
struct stm32f7_i2c_msg f7_msg;
- struct stm32f7_i2c_setup *setup;
+ struct stm32f7_i2c_setup setup;
struct stm32f7_i2c_timings timing;
};
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
},
};
-struct stm32f7_i2c_setup stm32f7_setup = {
+static const struct stm32f7_i2c_setup stm32f7_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
.dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
/* Enable I2C */
- if (i2c_dev->setup->analog_filter)
+ if (i2c_dev->setup.analog_filter)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
setup = of_device_get_match_data(&pdev->dev);
- i2c_dev->setup->rise_time = setup->rise_time;
- i2c_dev->setup->fall_time = setup->fall_time;
- i2c_dev->setup->dnf = setup->dnf;
- i2c_dev->setup->analog_filter = setup->analog_filter;
+ i2c_dev->setup = *setup;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
&rise_time);
if (!ret)
- i2c_dev->setup->rise_time = rise_time;
+ i2c_dev->setup.rise_time = rise_time;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
&fall_time);
if (!ret)
- i2c_dev->setup->fall_time = fall_time;
+ i2c_dev->setup.fall_time = fall_time;
- ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+ ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
goto clk_free;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 01b2adfd8226..eaf39e5db08b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
if (hwif_init(hwif) == 0) {
printk(KERN_INFO "%s: failed to initialize IDE "
"interface\n", hwif->name);
+ device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
ide_disable_port(hwif);
continue;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 86aa88aeb3a6..acf874800ca4 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
{
struct list_head *l;
struct pci_driver *d;
+ int ret;
list_for_each(l, &ide_pci_drivers) {
d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
const struct pci_device_id *id =
pci_match_id(d->id_table, dev);
- if (id != NULL && d->probe(dev, id) >= 0) {
- dev->driver = d;
- pci_dev_get(dev);
- return 1;
+ if (id != NULL) {
+ pci_assign_irq(dev);
+ ret = d->probe(dev, id);
+ if (ret >= 0) {
+ dev->driver = d;
+ pci_dev_get(dev);
+ return 1;
+ }
}
}
}
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 112d2fe1bcdb..fdc8e813170c 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
/**
* ide_pci_enable - do PCI enables
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
*
* Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
* Returns zero on success or an error code
*/
-static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+static int ide_pci_enable(struct pci_dev *dev, int bars,
+ const struct ide_port_info *d)
{
- int ret, bars;
+ int ret;
if (pci_enable_device(dev)) {
ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
goto out;
}
- if (d->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (d->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
ret = pci_request_selected_regions(dev, bars, d->name);
if (ret < 0)
printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
/**
* ide_setup_pci_controller - set up IDE PCI
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
* @noisy: verbose flag
*
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
* and enables it if need be
*/
-static int ide_setup_pci_controller(struct pci_dev *dev,
+static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
const struct ide_port_info *d, int noisy)
{
int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (noisy)
ide_setup_pci_noise(dev, d);
- ret = ide_pci_enable(dev, d);
+ ret = ide_pci_enable(dev, bars, d);
if (ret < 0)
goto out;
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (ret < 0) {
printk(KERN_ERR "%s %s: error accessing PCI regs\n",
d->name, pci_name(dev));
- goto out;
+ goto out_free_bars;
}
if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
ret = ide_pci_configure(dev, d);
if (ret < 0)
- goto out;
+ goto out_free_bars;
printk(KERN_INFO "%s %s: device enabled (Linux)\n",
d->name, pci_name(dev));
}
+ goto out;
+
+out_free_bars:
+ pci_release_selected_regions(dev, bars);
out:
return ret;
}
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
{
struct pci_dev *pdev[] = { dev1, dev2 };
struct ide_host *host;
- int ret, i, n_ports = dev2 ? 4 : 2;
+ int ret, i, n_ports = dev2 ? 4 : 2, bars;
struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
for (i = 0; i < n_ports / 2; i++) {
- ret = ide_setup_pci_controller(pdev[i], d, !i);
- if (ret < 0)
+ ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
+ if (ret < 0) {
+ if (i == 1)
+ pci_release_selected_regions(pdev[0], bars);
goto out;
+ }
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
}
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
host = ide_host_alloc(d, hws, n_ports);
if (host == NULL) {
ret = -ENOMEM;
- goto out;
+ goto out_free_bars;
}
host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
* do_ide_setup_pci_device() on the first device!
*/
if (ret < 0)
- goto out;
+ goto out_free_bars;
/* fixup IRQ */
if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
ret = ide_host_register(host, d, hws);
if (ret)
ide_host_free(host);
+ else
+ goto out;
+
+out_free_bars:
+ i = n_ports / 2;
+ while (i--)
+ pci_release_selected_regions(pdev[i], bars);
out:
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
if (ret)
goto pid_query_error;
+ nlmsg_end(skb, nlh);
+
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
if (ret)
goto add_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
if (ret)
goto query_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret)
goto remove_mapping_error;
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret)
goto mapinfo_num_error;
+
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret) {
skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret)
goto send_mapping_info_unlock;
+ nlmsg_end(skb, nlh);
+
iwpm_print_sockaddr(&map_info->local_sockaddr,
"send_mapping_info: Local sockaddr:");
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
* @wqe: cqp wqe for header
* @header: header for the cqp wqe
*/
-static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
{
wmb(); /* make sure WQE is populated before polarity is set */
set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
+
/* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
get_64bit_val(wqe, 24, &offset24);
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, offset24);
set_64bit_val(wqe, 0, buf->mem.pa);
set_64bit_val(wqe, 8,
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
- set_64bit_val(wqe, 24, offset24);
+ i40iw_insert_wqe_hdr(wqe, offset24);
}
/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
- /* Ensure all data is written before writing valid bit */
- wmb();
- set_64bit_val(wqe, 24, header[1]);
+ i40iw_insert_wqe_hdr(wqe, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 28b3d02d511b..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ attr->port_num = 1;
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
+ init_attr->port_num = 1;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d6fbad8f34aa..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4174,9 +4174,9 @@ err_bfreg:
err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-err_cnt:
- mlx5_ib_cleanup_cong_debugfs(dev);
err_cong:
+ mlx5_ib_cleanup_cong_debugfs(dev);
+err_cnt:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
u8 wqe_size;
u8 smac[ETH_ALEN];
- u16 vlan_id;
+ u16 vlan;
int rc;
} *rqe_wr_id;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
-EINVAL : 0;
- qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
/* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct qedr_cq *cq = get_qedr_cq(ibcq);
struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags;
+ u16 vlan_id;
int i = 0;
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
wc[i].wc_flags |= IB_WC_WITH_SMAC;
- if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+
+ vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_VID_MASK;
+ if (vlan_id) {
wc[i].wc_flags |= IB_WC_WITH_VLAN;
- wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ wc[i].vlan_id = vlan_id;
+ wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 51f8215877f5..8e8874d23717 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
- swiotlb = iommu_pass_through ? 1 : 0;
+ swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
/*
* In case we don't initialize SWIOTLB (actually the common case
- * when AMD IOMMU is enabled), make sure there are global
- * dma_ops set as a fall-back for devices not handled by this
- * driver (for example non-PCI devices).
+ * when AMD IOMMU is enabled and SME is not active), make sure there
+ * are global dma_ops set as a fall-back for devices not handled by
+ * this driver (for example non-PCI devices). When SME is active,
+ * make sure that swiotlb variable remains set so the global dma_ops
+ * continue to be SWIOTLB.
*/
if (!swiotlb)
dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index f596fcc32898..25c2c75f5332 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id sysmmu_of_match[] __initconst = {
+static const struct of_device_id sysmmu_of_match[] = {
{ .compatible = "samsung,exynos-sysmmu", },
{ },
};
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
- struct closure *cl;
+ struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
reverse = llist_reverse_order(list);
/* Then do the wakeups */
- llist_for_each_entry(cl, reverse, list) {
+ llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..203144762f36 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
extern atomic_t dm_global_event_nr;
extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..96ab46512e1f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
kfree(cipher_api);
return ret;
}
+ kfree(cipher_api);
return 0;
bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
+ if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+ ti->error = "Device size is not multiple of sector_size feature";
+ return -EINVAL;
+ }
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
* Round up the ptr to an 8-byte boundary.
*/
#define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+ return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
static inline void *align_ptr(void *ptr)
{
- return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+ return (void *)align_val((size_t)ptr);
}
/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
- struct dm_name_list *nl, *old_nl = NULL;
+ struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
uint32_t *event_nr;
down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += sizeof(struct dm_name_list);
- needed += strlen(hc->name) + 1;
- needed += ALIGN_MASK;
- needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t));
}
}
/*
* Grab our output buffer.
*/
- nl = get_result_buffer(param, param_size, &len);
+ nl = orig_nl = get_result_buffer(param, param_size, &len);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
strcpy(nl->name, hc->name);
old_nl = nl;
- event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
*event_nr = dm_get_event_nr(hc->md);
nl = align_ptr(event_nr + 1);
}
}
+ /*
+ * If mismatch happens, security may be compromised due to buffer
+ * overflow, so it's better to crash.
+ */
+ BUG_ON((char *)nl - (char *)orig_nl != needed);
out:
up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
* which has a variable size, is not used by the function processing
* the ioctl.
*/
-#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
ioctl_fn fn;
} _ioctls[] = {
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
{DM_LIST_DEVICES_CMD, 0, list_devices},
- {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
- {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
- {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+ {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
{DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+ if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+ dm_issue_global_event();
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1ac58c5651b7..2245d06d2045 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
static sector_t rs_get_progress(struct raid_set *rs,
sector_t resync_max_sectors, bool *array_in_sync)
{
- sector_t r, recovery_cp, curr_resync_completed;
+ sector_t r, curr_resync_completed;
struct mddev *mddev = &rs->md;
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- recovery_cp = mddev->recovery_cp;
*array_in_sync = false;
if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
r = curr_resync_completed;
else
- r = recovery_cp;
+ r = mddev->recovery_cp;
- if (r == MaxSector) {
+ if ((r == MaxSector) ||
+ (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+ (mddev->curr_resync_completed == resync_max_sectors))) {
/*
* Sync complete.
*/
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..4be85324f44d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+void dm_issue_global_event(void)
+{
+ atomic_inc(&dm_global_event_nr);
+ wake_up(&dm_global_eventq);
+}
+
/*
* One of these is allocated per bio.
*/
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
- atomic_inc(&dm_global_event_nr);
wake_up(&md->eventq);
- wake_up(&dm_global_eventq);
+ dm_issue_global_event();
}
/*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
}
map = __bind(md, table, &limits);
+ dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index ed43a4212479..129b558acc92 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init);
module_exit(ir_sharp_decode_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
MODULE_DESCRIPTION("Sharp IR protocol decoder");
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 5dba23ca2e5f..dc9bc1807fdf 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
down_read(&mm->mmap_sem);
- for (dar = addr; dar < addr + size; dar += page_size) {
- if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+ vma = find_vma(mm, addr);
+ if (!vma) {
+ pr_err("Can't find vma for addr %016llx\n", addr);
+ rc = -EFAULT;
+ goto out;
+ }
+ /* get the size of the pages allocated */
+ page_size = vma_kernel_pagesize(vma);
+
+ for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+ if (dar < vma->vm_start || dar >= vma->vm_end) {
vma = find_vma(mm, addr);
if (!vma) {
pr_err("Can't find vma for addr %016llx\n", addr);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c8307e8b4c16..0ccccbaf530d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
+
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4ff40d319676..78b3172c8e6e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/*
- * For not wake-able HW runtime pm framework
- * can't be used on pci device level.
- * Use domain runtime pm callbacks instead.
- */
- if (!pci_dev_run_wake(pdev))
- mei_me_set_pm_domain(dev);
+ * ME maps runtime suspend/resume to D0i states,
+ * hence we need to go around native PCI runtime service which
+ * eventually brings the device into D3cold/hot state,
+ * but the mei device cannot wake up from D3 unlike from D0i3.
+ * To get around the PCI device native runtime pm,
+ * ME uses runtime pm domain handlers which take precedence
+ * over the driver's pm handlers.
+ */
+ mei_me_set_pm_domain(dev);
if (mei_pg_is_enabled(dev))
pm_runtime_put_noidle(&pdev->dev);
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_me_unset_pm_domain(dev);
+ mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_me_unset_pm_domain(dev);
+ mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e38a5f144373..0566f9bfa7de 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/*
- * For not wake-able HW runtime pm framework
- * can't be used on pci device level.
- * Use domain runtime pm callbacks instead.
- */
- if (!pci_dev_run_wake(pdev))
- mei_txe_set_pm_domain(dev);
+ * TXE maps runtime suspend/resume to own power gating states,
+ * hence we need to go around native PCI runtime service which
+ * eventually brings the device into D3cold/hot state.
+ * But the TXE device cannot wake up from D3 unlike from own
+ * power gating. To get around PCI device native runtime pm,
+ * TXE uses runtime pm domain handlers which take precedence.
+ */
+ mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_txe_unset_pm_domain(dev);
+ mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
mei_stop(dev);
- if (!pci_dev_run_wake(pdev))
- mei_txe_unset_pm_domain(dev);
+ mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
else
ret = -EAGAIN;
- /*
- * If everything is okay we're about to enter PCI low
- * power state (D3) therefor we need to disable the
- * interrupts towards host.
- * However if device is not wakeable we do not enter
- * D-low state and we need to keep the interrupt kicking
- */
- if (!ret && pci_dev_run_wake(pdev))
- mei_disable_interrupts(dev);
+ /* keep irq on we are staying in D0 */
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 29fc1e662891..2ad7b5c69156 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
-
- mmc_queue_bounce_pre(mqrq);
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
brq = &mq_rq->brq;
old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
- mmc_queue_bounce_post(mq_rq);
switch (status) {
case MMC_BLK_SUCCESS:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a7eb623f8daa..36217ad5e9b1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1286,6 +1286,23 @@ out_err:
return err;
}
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ mmc_select_driver_type(card);
+
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
return err;
}
-static void mmc_select_driver_type(struct mmc_card *card)
-{
- int card_drv_type, drive_strength, drv_type;
-
- card_drv_type = card->ext_csd.raw_driver_strength |
- mmc_driver_type_mask(0);
-
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
-
- card->drive_strength = drive_strength;
-
- if (drv_type)
- mmc_set_driver_type(card->host, drv_type);
-}
-
/*
* For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 74c663b1c0a7..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
#include "core.h"
#include "card.h"
-#define MMC_QUEUE_BOUNCESZ 65536
-
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
- unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
-
- if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
- return 0;
-
- if (bouncesz > host->max_req_size)
- bouncesz = host->max_req_size;
- if (bouncesz > host->max_seg_size)
- bouncesz = host->max_seg_size;
- if (bouncesz > host->max_blk_count * 512)
- bouncesz = host->max_blk_count * 512;
-
- if (bouncesz <= 512)
- return 0;
-
- return bouncesz;
-}
-
/**
* mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
- if (card->bouncesz) {
- mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
- if (!mq_rq->bounce_buf)
- return -ENOMEM;
- if (card->bouncesz > 512) {
- mq_rq->sg = mmc_alloc_sg(1, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
- gfp);
- if (!mq_rq->bounce_sg)
- return -ENOMEM;
- }
- } else {
- mq_rq->bounce_buf = NULL;
- mq_rq->bounce_sg = NULL;
- mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- }
+ mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+ if (!mq_rq->sg)
+ return -ENOMEM;
return 0;
}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- /* It is OK to kfree(NULL) so this will be smooth */
- kfree(mq_rq->bounce_sg);
- mq_rq->bounce_sg = NULL;
-
- kfree(mq_rq->bounce_buf);
- mq_rq->bounce_buf = NULL;
-
kfree(mq_rq->sg);
mq_rq->sg = NULL;
}
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
- /*
- * mmc_init_request() depends on card->bouncesz so it must be calculated
- * before blk_init_allocated_queue() starts allocating requests.
- */
- card->bouncesz = mmc_queue_calc_bouncesz(host);
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- if (card->bouncesz) {
- blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
- blk_queue_max_segments(mq->queue, card->bouncesz / 512);
- blk_queue_max_segment_size(mq->queue, card->bouncesz);
- } else {
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- }
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
sema_init(&mq->thread_sem, 1);
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
- unsigned int sg_len;
- size_t buflen;
- struct scatterlist *sg;
struct request *req = mmc_queue_req_to_req(mqrq);
- int i;
-
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
-
- sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
-
- mqrq->bounce_sg_len = sg_len;
-
- buflen = 0;
- for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
- buflen += sg->length;
-
- sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
- return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
- return;
-
- sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
- return;
- sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
+ return blk_rq_map_sg(mq->queue, req, mqrq->sg);
}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 04fc89360a7a..f18d3f656baa 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -49,9 +49,6 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 27fb625cbcf3..fbd29f00fca0 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
*/
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
+ MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c885c2d4b904..85745ef179e2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
div->shift = __ffs(CLK_DIV_MASK);
div->width = __builtin_popcountl(CLK_DIV_MASK);
div->hw.init = &init;
- div->flags = (CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST);
+ div->flags = CLK_DIVIDER_ONE_BASED;
clk = devm_clk_register(host->dev, &div->hw);
if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
+ int ret;
+
+ /*
+ * If this is the initial tuning, try to get a sane Rx starting
+ * phase before doing the actual tuning.
+ */
+ if (!mmc->doing_retune) {
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
+ if (ret)
+ return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ /* Reset phases */
+ clk_set_phase(host->rx_clk, 0);
+ clk_set_phase(host->tx_clk, 270);
+
break;
case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->vqmmc_enabled = true;
}
- /* Reset rx phase */
- clk_set_phase(host->rx_clk, 0);
break;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 59ab194cb009..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
pxamci_init_ocr(host);
- /*
- * This architecture used to disable bounce buffers through its
- * defconfig, now it is done at runtime as a host property.
- */
- mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
+ mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 2eec2e652c53..0842bbc2d7ad 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
+ struct xenon_priv *priv;
int err;
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
/*
* Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
if (err)
goto free_pltfm;
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err)
+ goto err_clk;
+ }
+
err = mmc_of_parse(host->mmc);
if (err)
- goto err_clk;
+ goto err_clk_axi;
sdhci_get_of_property(pdev);
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
/* Xenon specific dt parse */
err = xenon_probe_dt(pdev);
if (err)
- goto err_clk;
+ goto err_clk_axi;
err = xenon_sdhc_prepare(host);
if (err)
- goto err_clk;
+ goto err_clk_axi;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
xenon_sdhc_unprepare(host);
+err_clk_axi:
+ clk_disable_unprepare(priv->axi_clk);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host);
-
+ clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 2bc0510c0769..9994995c7c56 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -83,6 +83,7 @@ struct xenon_priv {
unsigned char bus_width;
unsigned char timing;
unsigned int clock;
+ struct clk *axi_clk;
int phy_type;
/*
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c6678aa9b4ef..d74c7335c512 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
};
int i, err;
+ /* DSA and CPU ports have to be members of multiple vlans */
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ return 0;
+
if (!vid_begin)
return -EOPNOTSUPP;
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->irq > 0) {
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
+ mutex_lock(&chip->reg_lock);
mv88e6xxx_g1_irq_free(chip);
+ mutex_unlock(&chip->reg_lock);
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 214986436ece..0fdaaa643073 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -51,6 +51,10 @@
#define AQ_CFG_SKB_FRAGS_MAX 32U
+/* Number of descriptors available in one ring to resume this ring queue
+ */
+#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
+
#define AQ_CFG_NAPI_WEIGHT 64U
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6ac9e2602d6d..0a5bb4114eb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
return 0;
}
+static int aq_nic_update_link_status(struct aq_nic_s *self)
+{
+ int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+
+ if (err)
+ return err;
+
+ if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
+ pr_info("%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
+
+ self->link_status = self->aq_hw->aq_link_status;
+ if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ netif_tx_wake_all_queues(self->ndev);
+ }
+ if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
+ netif_carrier_off(self->ndev);
+ netif_tx_disable(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ }
+ return 0;
+}
+
static void aq_nic_service_timer_cb(unsigned long param)
{
struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
- err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
- if (err < 0)
+ err = aq_nic_update_link_status(self);
+ if (err)
goto err_exit;
- self->link_status = self->aq_hw->aq_link_status;
-
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
- if (self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
- AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
- AQ_NIC_LINK_DOWN);
- netif_carrier_on(self->ndev);
- } else {
- netif_carrier_off(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
- }
-
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
for (i = AQ_DIMOF(self->aq_vec); i--;) {
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
SET_NETDEV_DEV(ndev, dev);
ndev->if_port = port;
- ndev->min_mtu = ETH_MIN_MTU;
self->ndev = ndev;
self->aq_pci_func = aq_pci_func;
@@ -241,7 +256,6 @@ err_exit:
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
- unsigned int i = 0U;
if (!self->ndev) {
err = -EINVAL;
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
netif_carrier_off(self->ndev);
- for (i = AQ_CFG_VECS_MAX; i--;)
- aq_nic_ndev_queue_stop(self, i);
+ netif_tx_disable(self->ndev);
err = register_netdev(self->ndev);
if (err < 0)
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+ self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
return 0;
}
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
err = -EINVAL;
goto err_exit;
}
- if (netif_running(ndev)) {
- unsigned int i;
-
- for (i = AQ_CFG_VECS_MAX; i--;)
- netif_stop_subqueue(ndev, i);
- }
+ if (netif_running(ndev))
+ netif_tx_disable(ndev);
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
self->aq_vecs++) {
@@ -383,16 +393,6 @@ err_exit:
return err;
}
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
-{
- netif_start_subqueue(self->ndev, idx);
-}
-
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
-{
- netif_stop_subqueue(self->ndev, idx);
-}
-
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_nic_ndev_queue_start(self, i);
-
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
if (err < 0)
goto err_exit;
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ netif_tx_start_all_queues(self->ndev);
+
err_exit:
return err;
}
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
+ dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
goto exit;
+ first = dx_buff;
dx_buff->len_pkt = skb->len;
dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U;
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
for (; nr_frags--; ++frag_count) {
unsigned int frag_len = 0U;
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag);
- frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
- frag_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
- goto mapping_error;
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
+ frag,
+ buff_offset,
+ buff_size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
+ frag_pa)))
+ goto mapping_error;
- while (frag_len > AQ_CFG_TX_FRAME_MAX) {
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
- dx_buff->len = AQ_CFG_TX_FRAME_MAX;
+ dx_buff->len = buff_size;
dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
- frag_len -= AQ_CFG_TX_FRAME_MAX;
- frag_pa += AQ_CFG_TX_FRAME_MAX;
++ret;
}
-
- dx = aq_ring_next_dx(ring, dx);
- dx_buff = &ring->buff_ring[dx];
-
- dx_buff->flags = 0U;
- dx_buff->len = frag_len;
- dx_buff->pa = frag_pa;
- dx_buff->is_mapped = 1U;
- ++ret;
}
+ first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
goto exit;
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
int err = NETDEV_TX_OK;
- bool is_nic_in_bad_state;
frags = skb_shinfo(skb)->nr_frags + 1;
@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
goto err_exit;
}
- is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
- AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
- (aq_ring_avail_dx(ring) <
- AQ_CFG_SKB_FRAGS_MAX);
+ aq_ring_update_queue_state(ring);
- if (is_nic_in_bad_state) {
- aq_nic_ndev_queue_stop(self, ring->idx);
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
ring,
frags);
if (err >= 0) {
- if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
- aq_nic_ndev_queue_stop(self, ring->idx);
-
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
- int err = 0;
-
- if (new_mtu > self->aq_hw_caps.mtu) {
- err = -EINVAL;
- goto err_exit;
- }
self->aq_nic_cfg.mtu = new_mtu;
-err_exit:
- return err;
+ return 0;
}
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_nic_ndev_queue_stop(self, i);
+ netif_tx_disable(self->ndev);
del_timer_sync(&self->service_timer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 7fc2a5ecb2b7..0ddd556ff901 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
int aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4eee1996a825..0654e0c76bc2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
return 0;
}
+static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
+ unsigned int t)
+{
+ return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+void aq_ring_update_queue_state(struct aq_ring_s *ring)
+{
+ if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
+ aq_ring_queue_stop(ring);
+ else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
+ aq_ring_queue_wake(ring);
+}
+
+void aq_ring_queue_wake(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (__netif_subqueue_stopped(ndev, ring->idx)) {
+ netif_wake_subqueue(ndev, ring->idx);
+ ring->stats.tx.queue_restarts++;
+ }
+}
+
+void aq_ring_queue_stop(struct aq_ring_s *ring)
+{
+ struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+ if (!__netif_subqueue_stopped(ndev, ring->idx))
+ netif_stop_subqueue(ndev, ring->idx);
+}
+
void aq_ring_tx_clean(struct aq_ring_s *self)
{
struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) {
- if (unlikely(buff->is_sop))
+ if (unlikely(buff->is_sop)) {
+ if (!buff->is_eop &&
+ buff->eop_index != 0xffffU &&
+ (!aq_ring_dx_in_range(self->sw_head,
+ buff->eop_index,
+ self->hw_head)))
+ break;
+
dma_unmap_single(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
- else
+ } else {
dma_unmap_page(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
+ }
}
if (unlikely(buff->is_eop))
dev_kfree_skb_any(buff->skb);
- }
-}
-static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
- unsigned int t)
-{
- return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+ buff->pa = 0U;
+ buff->eop_index = 0xffffU;
+ }
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 782176c5f4f8..5844078764bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
- u32 len:16;
+ u16 len;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
u32 is_cleaned:1;
u32 is_error:1;
u32 rsvd3:6;
+ u16 eop_index;
+ u16 rsvd4;
};
- u32 flags;
+ u64 flags;
};
};
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
u64 errors;
u64 packets;
u64 bytes;
+ u64 queue_restarts;
};
union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_update_queue_state(struct aq_ring_s *ring);
+void aq_ring_queue_wake(struct aq_ring_s *ring);
+void aq_ring_queue_stop(struct aq_ring_s *ring);
void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ebf588004c46..305ff8ffac2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (ring[AQ_VEC_TX_ID].sw_head !=
ring[AQ_VEC_TX_ID].hw_head) {
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
-
- if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
- AQ_CFG_SKB_FRAGS_MAX) {
- aq_nic_ndev_queue_start(self->aq_nic,
- ring[AQ_VEC_TX_ID].idx);
- }
+ aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
was_tx_cleaned = true;
}
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
stats_tx->errors += tx->errors;
+ stats_tx->queue_restarts += tx->queue_restarts;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index f3957e930340..fcf89e25a773 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -16,7 +16,7 @@
#include "../aq_common.h"
-#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU_JUMBO 16352U
#define HW_ATL_B0_MTU 1514U
#define HW_ATL_B0_TX_RINGS 4U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 4f5ec9a0fbfb..bf734b32e44b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
break;
default:
- link_status->mbps = 0U;
- break;
+ return -EBUSY;
}
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cec94bbb2ea5..8bc126a156e8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
- return -ENOMEM;
+ goto error;
n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 49b80da51ba7..805ab45e9b5a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true;
default:
bpf_warn_invalid_xdp_action(action);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
+ /* fall through */
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e0685e630afe..c1cdbfd83bdb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2652,7 +2652,8 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev,
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
rss_size);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
roundup_size = roundup_pow_of_two(rss_size);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 523f9d05a810..8a32eb7d47b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
-#ifndef CONFIG_SPARC
- u32 regval;
- u32 i;
-#endif
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
- /* Disable relaxed ordering */
- for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
- }
-
- for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2c19070d2a0b..6e6ab6f6875e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 72c565712a5f..c3e7a8191128 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
- int i, err = 0;
+ int i, j, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
/* allocate temporary buffer to store rings in */
- i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
- i = max_t(int, i, adapter->num_xdp_queues);
+ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+ adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[j],
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
- memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ memcpy(adapter->xdp_ring[j], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d962368d08d0..4d76afd13868 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
return;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return ixgbe_ptp_set_ts_config(adapter, req);
case SIOCGHWTSTAMP:
return ixgbe_ptp_get_ts_config(adapter, req);
+ case SIOCGMIIPHY:
+ if (!adapter->hw.phy.ops.read_reg)
+ return -EOPNOTSUPP;
+ /* fall through */
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index dd0ee2691c86..9c86cb7cb988 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -333,7 +333,7 @@
#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
-#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
+#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
@@ -676,6 +676,7 @@ enum mvpp2_tag_type {
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
#define MVPP2_PRS_RI_UDF3_MASK 0x300000
#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
@@ -792,6 +793,7 @@ struct mvpp2 {
struct clk *pp_clk;
struct clk *gop_clk;
struct clk *mg_clk;
+ struct clk *axi_clk;
/* List of pointers to port structures */
struct mvpp2_port **port_list;
@@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
(proto != IPPROTO_IGMP))
return -EINVAL;
- /* Fragmented packet */
+ /* Not fragmented packet */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
@@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
- ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
@@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
mvpp2_prs_hw_write(priv, &pe);
- /* Not fragmented packet */
+ /* Fragmented packet */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
@@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
- mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
- mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
@@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
- val |= MVPP2_GMAC_PORT_RGMII_MASK;
}
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
@@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node,
- struct mvpp2 *priv)
+ struct mvpp2 *priv, int index)
{
struct device_node *phy_node;
struct phy *comphy;
@@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- priv->port_list[id] = port;
+ priv->port_list[index] = port;
return 0;
err_free_port_pcpu:
@@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->mg_clk);
if (err < 0)
goto err_gop_clk;
+
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_gop_clk;
+ priv->axi_clk = NULL;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
}
/* Get system's tclk rate */
@@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Initialize ports */
+ i = 0;
for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv);
+ err = mvpp2_port_probe(pdev, port_node, priv, i);
if (err < 0)
goto err_mg_clk;
+ i++;
}
platform_set_drvdata(pdev, priv);
return 0;
err_mg_clk:
+ clk_disable_unprepare(priv->axi_clk);
if (priv->hw_version == MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
@@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 1e3a6c3e4132..80eef4163f52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
- TP_PROTO(const struct fs_fte *fte, bool new_fte),
+ TP_PROTO(const struct fs_fte *fte, int new_fte),
TP_ARGS(fte, new_fte),
TP_STRUCT__entry(
__field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
__field(u32, action)
__field(u32, flow_tag)
__field(u8, mask_enable)
- __field(bool, new_fte)
+ __field(int, new_fte)
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f11fd07ac4dd..850cdc980ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_any_vid_rules(priv);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_any_vid_rules(priv);
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dfc29720ab77..cc11bbbd0309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
struct mlx5e_sw_stats temp, *s = &temp;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
- u64 tx_offload_none = 0;
int i, j;
memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_dropped += sq_stats->dropped;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- tx_offload_none += sq_stats->csum_none;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
}
}
- /* Update calculated offload counters */
- s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
- s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
-
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable);
if (err) {
- netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
- enable ? "Enable" : "Disable", feature, err);
+ netdev_err(netdev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f1dd638384d3..15a1687483cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rq->stats.csum_unnecessary++;
return;
}
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_unnecessary_inner++;
+ return;
}
+ rq->stats.csum_unnecessary++;
return;
}
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 6d199ffb1c0b..f8637213afc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_drop;
u64 rx_xdp_tx;
u64 rx_xdp_tx_full;
+ u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
+ u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
+ u64 csum_partial;
u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index da503e6411da..1aa2028ed995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
return true;
}
+static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+ struct tcf_exts *exts)
+{
+ const struct tc_action *a;
+ bool modify_ip_header;
+ LIST_HEAD(actions);
+ u8 htype, ip_proto;
+ void *headers_v;
+ u16 ethertype;
+ int nkeys, i;
+
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+
+ /* for non-IP we only re-write MACs, so we're okay */
+ if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+ goto out_ok;
+
+ modify_ip_header = false;
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (!is_tcf_pedit(a))
+ continue;
+
+ nkeys = tcf_pedit_nkeys(a);
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
+ htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
+ modify_ip_header = true;
+ break;
+ }
+ }
+ }
+
+ ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+ if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+ return false;
+ }
+
+out_ok:
+ return true;
+}
+
+static bool actions_match_supported(struct mlx5e_priv *priv,
+ struct tcf_exts *exts,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
+{
+ u32 actions;
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ actions = flow->esw_attr->action;
+ else
+ actions = flow->nic_attr->action;
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return modify_header_match_supported(&parse_attr->spec, exts);
+
+ return true;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ if (!actions_match_supported(priv, exts, parse_attr, flow))
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto free_encap;
}
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
&fl4, &n, &ttl);
if (err)
- goto out;
+ goto free_encap;
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
*/
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
if (err)
- goto out;
+ goto free_encap;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
@@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
kfree(encap_header);
+out:
if (n)
neigh_release(n);
return err;
@@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto free_encap;
}
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
&fl6, &n, &ttl);
if (err)
- goto out;
+ goto free_encap;
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
*/
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
if (err)
- goto out;
+ goto free_encap;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
@@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
kfree(encap_header);
+out:
if (n)
neigh_release(n);
return err;
@@ -1791,6 +1859,7 @@ vxlan_encap_offload_err:
}
}
+ /* must verify if encap is valid or not */
if (found)
goto attach_flow;
@@ -1817,6 +1886,8 @@ attach_flow:
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
attr->encap_id = e->encap_id;
+ else
+ err = -EAGAIN;
return err;
@@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+
+ if (!actions_match_supported(priv, exts, parse_attr, flow))
+ return -EOPNOTSUPP;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fee43e40fa16..1d6925d4369a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats.csum_partial++;
}
} else
sq->stats.csum_none++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index e37453d838db..c0fd2212e890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
return 0;
}
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+int mlx5_fpga_caps(struct mlx5_core_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
- return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+ return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
MLX5_ST_SZ_BYTES(fpga_cap),
MLX5_REG_FPGA_CAP, 0, 0);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index 94bdfd47c3f0..d05233c9b4f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
u64 rx_total_drop;
};
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_caps(struct mlx5_core_dev *dev);
int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 9034e9960a76..dc8970346521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err)
goto out;
- err = mlx5_fpga_caps(fdev->mdev,
- fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+ err = mlx5_fpga_caps(fdev->mdev);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e0d0efd903bc..36ecc2b2e187 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
}
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++;
}
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
list_size);
}
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5509a752f98e..48dd78975062 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -52,6 +52,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
};
enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, &(fte)->node.children)
+#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
+ (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
+ (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
+ (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+ )
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 85298051a3e4..145e392ab849 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
const struct mlx5e_profile *profile = priv->profile;
+ struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
destroy_workqueue(priv->wq);
free_netdev(netdev);
- mlx5e_destroy_mdev_resources(priv->mdev);
+ mlx5e_destroy_mdev_resources(mdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6c48e9959b65..2a8b529ce6dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
mlx5_core_warn(dev,
"failed to restore VF %d settings, err %d\n",
vf, err);
- continue;
+ continue;
}
}
mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2cfb3f5d092d..c16718d296d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_rif_fini(nh);
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
break;
}
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ if (err)
+ return err;
+ mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+ return 0;
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3500,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
- struct mlxsw_sp_lpm_tree *lpm_tree;
-
- /* Aggregate prefix lengths across all virtual routers to make
- * sure we only have used prefix lengths in the LPM tree.
- */
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- fib->proto);
- if (IS_ERR(lpm_tree))
- goto err_tree_get;
- mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
return;
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
@@ -4009,7 +4000,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV6)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ if (err)
+ return err;
+ mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+ return 0;
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -5068,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
+ vr->rif_count++;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err)
@@ -5099,7 +5095,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
- vr->rif_count++;
return rif;
@@ -5110,6 +5105,7 @@ err_fid_get:
kfree(rif);
err_rif_alloc:
err_rif_index_alloc:
+ vr->rif_count--;
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
@@ -5124,7 +5120,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- vr->rif_count--;
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
ops->deconfigure(rif);
@@ -5132,6 +5127,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
kfree(rif);
+ vr->rif_count--;
mlxsw_sp_vr_put(vr);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 0ea3ca09c689..3ed9033e56db 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -898,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
curr_rxbuf->dma_addr =
dma_map_single(adpt->netdev->dev.parent, skb->data,
- curr_rxbuf->length, DMA_FROM_DEVICE);
+ adpt->rxbuf_size, DMA_FROM_DEVICE);
+
ret = dma_mapping_error(adpt->netdev->dev.parent,
curr_rxbuf->dma_addr);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 98f22551eb45..1e33aea59f50 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -51,10 +51,7 @@ struct rmnet_walk_data {
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
- rx_handler_func_t *rx_handler;
-
- rx_handler = rcu_dereference(real_dev->rx_handler);
- return (rx_handler == rmnet_rx_handler);
+ return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
/* Needs rtnl lock */
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
index a63ef82e7c72..dfae3c9d57c6 100644
--- a/drivers/net/ethernet/rocker/rocker_tlv.h
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
int rocker_tlv_put(struct rocker_desc_info *desc_info,
int attrtype, int attrlen, const void *data);
-static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
- int attrtype, u8 value)
+static inline int
+rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+ u8 tmp = value; /* work around GCC PR81715 */
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
}
-static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
- int attrtype, u16 value)
+static inline int
+rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
}
-static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
- int attrtype, __be16 value)
+static inline int
+rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
}
-static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
- int attrtype, u32 value)
+static inline int
+rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
}
-static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
- int attrtype, __be32 value)
+static inline int
+rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
}
-static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
- int attrtype, u64 value)
+static inline int
+rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+ u64 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
}
static inline struct rocker_tlv *
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd6a2f9791cc..5efef8001edf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
.remove = dwc_eth_dwmac_remove,
.driver = {
.name = "dwc-eth-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = dwc_eth_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 99823f54696a..13133b30b575 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -83,6 +83,117 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK3128_GRF_MAC_CON0 0x0168
+#define RK3128_GRF_MAC_CON1 0x016c
+
+/* RK3128_GRF_MAC_CON0 */
+#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3128_GRF_MAC_CON1 */
+#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
+#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
+#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
+#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
+#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
+ DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
+ RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+}
+
+static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_2_5M |
+ RK3128_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_25M |
+ RK3128_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3128_ops = {
+ .set_to_rgmii = rk3128_set_to_rgmii,
+ .set_to_rmii = rk3128_set_to_rmii,
+ .set_rgmii_speed = rk3128_set_rgmii_speed,
+ .set_rmii_speed = rk3128_set_rmii_speed,
+};
+
#define RK3228_GRF_MAC_CON0 0x0900
#define RK3228_GRF_MAC_CON1 0x0904
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
{ .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c4407e8e39a3..2f7d7ec59962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
{
void __iomem *ioaddr = hw->pcsr;
unsigned int pmt = 0;
+ u32 config;
if (mode & WAKE_MAGIC) {
pr_debug("GMAC: WOL Magic frame\n");
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
pmt |= power_down | global_unicast | wake_up_frame_en;
}
+ if (pmt) {
+ /* The receiver must be enabled for WOL before powering down */
+ config = readl(ioaddr + GMAC_CONFIG);
+ config |= GMAC_CONFIG_RE;
+ writel(config, ioaddr + GMAC_CONFIG);
+ }
writel(pmt, ioaddr + GMAC_PMT);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a404552555d4..e365866600ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,7 +120,7 @@ struct ppp {
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
- int *xmit_recursion __percpu; /* xmit recursion detect */
+ int __percpu *xmit_recursion; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static int ppp_dev_init(struct net_device *dev)
{
+ struct ppp *ppp;
+
netdev_lockdep_set_classes(dev);
+
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+ * unregistered.
+ */
+ atomic_inc(&ppp->file.refcnt);
+
return 0;
}
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait);
}
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ ppp = netdev_priv(dev);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3c9985f29950..5ce580f413b9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8ab281b478f2..52ea80bcd639 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
desc->bInterfaceProtocol == 3);
}
+static int is_novatel_rndis(struct usb_interface_descriptor *desc)
+{
+ return (desc->bInterfaceClass == USB_CLASS_MISC &&
+ desc->bInterfaceSubClass == 4 &&
+ desc->bInterfaceProtocol == 1);
+}
+
#else
#define is_rndis(desc) 0
#define is_activesync(desc) 0
#define is_wireless_rndis(desc) 0
+#define is_novatel_rndis(desc) 0
#endif
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
*/
rndis = (is_rndis(&intf->cur_altsetting->desc) ||
is_activesync(&intf->cur_altsetting->desc) ||
- is_wireless_rndis(&intf->cur_altsetting->desc));
+ is_wireless_rndis(&intf->cur_altsetting->desc) ||
+ is_novatel_rndis(&intf->cur_altsetting->desc));
memset(info, 0, sizeof(*info));
info->control = intf;
@@ -547,9 +556,11 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
+#define UBLOX_VENDOR_ID 0x1546
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -737,6 +748,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
@@ -850,6 +870,18 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&zte_cdc_info,
}, {
+ /* U-blox TOBY-L2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ /* U-blox SARA-U2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ceb78e2ea4f0..941ece08ba78 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -613,6 +613,7 @@ enum rtl8152_flags {
#define VENDOR_ID_MICROSOFT 0x045e
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a151f267aebb..b807c91abe1d 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -632,6 +632,10 @@ static const struct usb_device_id products [] = {
/* RNDIS for tethering */
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
+}, {
+ /* Novatel Verizon USB730L */
+ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
+ .driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bc1633945a56..195dafb98131 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
-#ifdef CONFIG_PM
-
-static int ath10k_pci_pm_suspend(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
return ret;
}
-static int ath10k_pci_pm_resume(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
ath10k_pci_pm_suspend,
ath10k_pci_pm_resume);
-#endif
static struct pci_driver ath10k_pci_driver = {
.name = "ath10k_pci",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index aaed4ab503ad..4157c90ad973 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
+ params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
- /* if request is null exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
n_ssids = request->n_ssids;
n_channels = request->n_channels;
+
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
ptr += sizeof(ssid_le);
}
} else {
- brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
- if ((request->ssids) && request->ssids->ssid_len) {
- brcmf_dbg(SCAN, "SSID %s len=%d\n",
- params_le->ssid_le.SSID,
- request->ssids->ssid_len);
- params_le->ssid_le.SSID_len =
- cpu_to_le32(request->ssids->ssid_len);
- memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
- request->ssids->ssid_len);
- }
+ brcmf_dbg(SCAN, "Performing passive scan\n");
+ params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
+ u32 escan_buflen;
struct brcmf_bss_info_le *bss_info_le;
struct brcmf_bss_info_le *bss = NULL;
u32 bi_length;
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
+ if (e->datalen < sizeof(*escan_result_le)) {
+ brcmf_err("invalid event data length\n");
+ goto exit;
+ }
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
+ escan_buflen = le32_to_cpu(escan_result_le->buflen);
+ if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
+ escan_buflen > e->datalen ||
+ escan_buflen < sizeof(*escan_result_le)) {
+ brcmf_err("Invalid escan buffer length: %d\n",
+ escan_buflen);
+ goto exit;
+ }
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bi_length = le32_to_cpu(bss_info_le->length);
- if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
- WL_ESCAN_RESULTS_FIXED_SIZE)) {
- brcmf_err("Invalid bss_info length %d: ignoring\n",
+ if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+ brcmf_err("Ignoring invalid bss_info length: %d\n",
bi_length);
goto exit;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 8391989b1882..e0d22fedb2b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT 0xFF
+#define BRCMF_SCANTYPE_ACTIVE 0
+#define BRCMF_SCANTYPE_PASSIVE 1
+
#define BRCMF_WSEC_MAX_PSK_LEN 32
#define BRCMF_WSEC_PASSPHRASE BIT(0)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 5de19ea10575..b205a7bfb828 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2167,7 +2167,7 @@ out:
* 1. We are not using a unified image
* 2. We are using a unified image but had an error while exiting D3
*/
- set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/*
* When switching images we return 1, which causes mac80211
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 15f2d826bb4b..3bcaa82f59b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
struct iwl_mvm_mc_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
int ret, len;
/* if we don't have free ports, mcast frames will be dropped */
@@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
@@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
if (!cmd)
goto out;
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
iwl_mvm_recalc_multicast(mvm);
out:
mutex_unlock(&mvm->mutex);
@@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
* queues, so we should never get a second deferred
* frame for the RA/TID.
*/
- iwl_mvm_start_mac_queues(mvm, info->hw_queue);
+ iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
ieee80211_free_txskb(mvm->hw, skb);
}
}
@@ -3975,6 +3989,43 @@ out_unlock:
return ret;
}
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+ if (drop) {
+ if (iwl_mvm_has_new_tx_api(mvm))
+ /* TODO new tx api */
+ WARN_ONCE(1,
+ "Need to implement flush TX queue\n");
+ else
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues,
+ 0);
+ } else {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct ieee80211_sta *sta;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
+ }
+
+ mutex_unlock(&mvm->mutex);
+ } else {
+ iwl_trans_wait_tx_queues_empty(mvm->trans,
+ queues);
+ }
+ }
+}
+
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u32 queues, bool drop)
{
@@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
int i;
u32 msk = 0;
- if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ if (!vif) {
+ iwl_mvm_flush_no_vif(mvm, queues, drop);
+ return;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION)
return;
/* Make sure we're done with the deferred traffic before flushing */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ba7bd049d3d4..0fe723ca844e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
(lq_sta->tx_agg_tid_en & BIT(tid)) &&
(tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
- rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
+ if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
+ tid_data->state = IWL_AGG_QUEUED;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 67ffd9774712..77f77bc5d083 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the
- * rest of the function will take of storing it and releasing up to the
- * nssn
+ * rest of the function will take care of storing it and releasing up to
+ * the nssn
*/
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
- buffer->buf_size)) {
+ buffer->buf_size) ||
+ !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 50983615dce6..774122fed454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_ABORT_CMD,
};
- u32 status;
+ u32 status = CAN_ABORT_STATUS;
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 411a2055dc45..c4a343534c5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
{
struct iwl_mvm_add_sta_cmd cmd;
int ret;
- u32 status;
+ u32 status = ADD_STA_SUCCESS;
lockdep_assert_held(&mvm->mutex);
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
- IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+ if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+ mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm,
+ "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
mvmsta->tid_data[tid].state);
return -ENXIO;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d13893806513..aedabe101cf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -281,6 +281,7 @@ struct iwl_mvm_vif;
* These states relate to a specific RA / TID.
*
* @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
* @IWL_AGG_STARTING: aggregation are starting (between start and oper)
* @IWL_AGG_ON: aggregation session is up
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
@@ -290,6 +291,7 @@ struct iwl_mvm_vif;
*/
enum iwl_mvm_agg_state {
IWL_AGG_OFF = 0,
+ IWL_AGG_QUEUED,
IWL_AGG_STARTING,
IWL_AGG_ON,
IWL_EMPTYING_HW_QUEUE_ADDBA,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 8876c2abc440..4d907f60bce9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
lockdep_assert_held(&mvm->mutex);
+ status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
CTDP_CONFIG_CMD),
sizeof(cmd), &cmd, &status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 172b5e63d3fb..6f2e2af23219 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
/*
- * Handle legacy hostapd as well, where station will be added
- * only just before sending the association response.
+ * Non-bufferable frames use the broadcast station, thus they
+ * use the probe queue.
* Also take care of the case where we send a deauth to a
* station that we don't have, or similarly an association
* response (with non-success status) for a station we can't
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
* Also, disassociate frames might happen, particular with
* reason 7 ("Class 3 frame received from nonassociated STA").
*/
- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
- ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) ||
- ieee80211_is_disassoc(fc))
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(fc) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return mvmvif->cab_queue;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 856fa6e8327e..a450bc6bc774 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
vif = qtnf_netdev_get_priv(wdev->netdev);
+ qtnf_scan_done(vif->mac, true);
+
if (qtnf_cmd_send_del_intf(vif))
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
vif->vifid);
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
+ qtnf_scan_done(vif->mac, true);
+
ret = qtnf_cmd_send_stop_ap(vif);
if (ret) {
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
!qtnf_sta_list_lookup(&vif->sta_list, params->mac))
return 0;
- qtnf_scan_done(vif->mac, true);
-
ret = qtnf_cmd_send_del_sta(vif, params);
if (ret)
pr_err("VIF%u.%u: failed to delete STA %pM\n",
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
}
vif->sta_state = QTNF_STA_DISCONNECTED;
- qtnf_scan_done(mac, true);
}
+
+ qtnf_scan_done(mac, true);
}
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 6a4af52522b8..66db26613b1f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
.aborted = aborted,
};
+ if (timer_pending(&mac->scan_timeout))
+ del_timer_sync(&mac->scan_timeout);
+
mutex_lock(&mac->mac_lock);
if (mac->scan_req) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 0fc2814eafad..43d2e7fd6e02 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
return -EINVAL;
}
- if (timer_pending(&mac->scan_timeout))
- del_timer_sync(&mac->scan_timeout);
qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 502e72b7cdcc..69131965a298 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
dma_addr_t txbd_paddr, skb_paddr;
struct qtnf_tx_bd *txbd;
+ unsigned long flags;
int len, i;
u32 info;
int ret = 0;
+ spin_lock_irqsave(&priv->tx0_lock, flags);
+
if (!qtnf_tx_queue_ready(priv)) {
if (skb->dev)
netif_stop_queue(skb->dev);
+ spin_unlock_irqrestore(&priv->tx0_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -717,8 +721,10 @@ tx_done:
dev_kfree_skb_any(skb);
}
- qtnf_pcie_data_tx_reclaim(priv);
priv->tx_done_count++;
+ spin_unlock_irqrestore(&priv->tx0_lock, flags);
+
+ qtnf_pcie_data_tx_reclaim(priv);
return NETDEV_TX_OK;
}
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
init_completion(&bus->request_firmware_complete);
mutex_init(&bus->bus_lock);
+ spin_lock_init(&pcie_priv->tx0_lock);
spin_lock_init(&pcie_priv->irq_lock);
spin_lock_init(&pcie_priv->tx_reclaim_lock);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
index e76a23716ee0..86ac1ccedb52 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
/* lock for tx reclaim operations */
spinlock_t tx_reclaim_lock;
+ /* lock for tx0 operations */
+ spinlock_t tx0_lock;
u8 msi_enabled;
int mps;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bb2aad078637..5a14cc7f28ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2136,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ns->uuid) ||
+ if (uuid_is_null(&ns->uuid) &&
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return 0;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cb73bc8cad3b..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -94,7 +94,7 @@ struct nvme_dev {
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
+ pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
@@ -1226,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
nvmeq->sq_cmds_io = dev->cmb + offset;
} else {
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1527,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
- dma_addr_t dma_addr;
+ int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1540,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(dev->cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
return NULL;
@@ -1553,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
- cmb = ioremap_wc(dma_addr, size);
+ cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
if (!cmb)
return NULL;
- dev->cmb_dma_addr = dma_addr;
+ dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
return cmb;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 260d33c0f26c..63897531cd75 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
- return !add_preferred_console(name, index,
- kstrdup(of_stdout_options, GFP_KERNEL));
+
+ /*
+ * XXX: cast `options' to char pointer to suppress complication
+ * warnings: printk, UART and console drivers expect char pointer.
+ */
+ return !add_preferred_console(name, index, (char *)of_stdout_options);
}
EXPORT_SYMBOL_GPL(of_console_check);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c3569a88..32771c2ced7b 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,7 +25,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#define MAX_RESERVED_REGIONS 16
+#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fbb72116e9d4..264c355ba1ff 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
struct device_node *np;
/* Get the parent of the port */
- np = of_get_next_parent(to_of_node(fwnode));
+ np = of_get_parent(to_of_node(fwnode));
if (!np)
return NULL;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 89f4e3d072d7..26ed0c08f209 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->busnr = 0;
bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0) {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 9c40da54f88a..1987fec1f126 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,6 +233,7 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
+ unsigned long pages;
struct mutex lock;
u64 phys;
int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
goto err;
}
- /*
- * The PCI host bridge on Tegra contains some logic that intercepts
- * MSI writes, which means that the MSI target address doesn't have
- * to point to actual physical memory. Rather than allocating one 4
- * KiB page of system memory that's never used, we can simply pick
- * an arbitrary address within an area reserved for system memory
- * in the FPCI address map.
- *
- * However, in order to avoid confusion, we pick an address that
- * doesn't map to physical memory. The FPCI address map reserves a
- * 1012 GiB region for system memory and memory-mapped I/O. Since
- * none of the Tegra SoCs that contain this PCI host bridge can
- * address more than 16 GiB of system memory, the last 4 KiB of
- * these 1012 GiB is a good candidate.
- */
- msi->phys = 0xfcfffff000;
+ /* setup AFI/FPCI range */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ msi->phys = virt_to_phys((void *)msi->pages);
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+ free_pages(msi->pages, 0);
+
if (msi->irq > 0)
free_irq(msi->irq, pcie);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1778cf4f81c7..82cd8b08d71f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -100,6 +100,7 @@ config PINCTRL_AMD
tristate "AMD GPIO pin control"
depends on GPIOLIB
select GPIOLIB_IRQCHIP
+ select PINMUX
select PINCONF
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0944310225db..ff782445dfb7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
unsigned long events;
unsigned offset;
unsigned gpio;
- unsigned int type;
events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
events &= mask;
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
- /* FIXME: no clue why the code looks up the type here */
- type = pc->irq_type[gpio];
-
generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
gpio));
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 04e929fd0ffe..fadbca907c7c 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
+ int irq_base;
*chip = chv_gpio_chip;
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
- ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ if (!need_valid_mask) {
+ irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+ chip->ngpio, NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ return irq_base;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 315a4be8dc1e..9a68914100ad 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO);
MODULE_PARM_DESC(mbox_sel,
"RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+static DEFINE_SPINLOCK(tsi721_maint_lock);
+
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
+ unsigned long flags;
int i, err = 0;
u32 op = do_wr ? MAINT_WR : MAINT_RD;
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
+ spin_lock_irqsave(&tsi721_maint_lock, flags);
+
bd_ptr = priv->mdma.bd_base;
rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
*/
swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
+
err_out:
+ spin_unlock_irqrestore(&tsi721_maint_lock, flags);
return err;
}
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index a3824baca2e5..3ee9af83b638 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -14,16 +14,8 @@
#include <linux/module.h>
/*
- * These interrupt-safe spinlocks protect all accesses to RIO
- * configuration space and doorbell access.
- */
-static DEFINE_SPINLOCK(rio_config_lock);
-static DEFINE_SPINLOCK(rio_doorbell_lock);
-
-/*
* Wrappers for all RIO configuration access functions. They just check
- * alignment, do locking and call the low-level functions pointed to
- * by rio_mport->ops.
+ * alignment and call the low-level functions pointed to by rio_mport->ops.
*/
#define RIO_8_BAD 0
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \
(struct rio_mport *mport, u32 offset, type *value) \
{ \
int res; \
- unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \
int __rio_local_write_config_##size \
(struct rio_mport *mport, u32 offset, type value) \
{ \
- int res; \
- unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
- res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
- spin_unlock_irqrestore(&rio_config_lock, flags); \
- return res; \
+ return mport->ops->lcwrite(mport, mport->id, offset, len, value);\
}
RIO_LOP_READ(8, u8, 1)
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \
{ \
int res; \
- unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \
int rio_mport_write_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \
{ \
- int res; \
- unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
- spin_lock_irqsave(&rio_config_lock, flags); \
- res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
- spin_unlock_irqrestore(&rio_config_lock, flags); \
- return res; \
+ return mport->ops->cwrite(mport, mport->id, destid, hopcount, \
+ offset, len, value); \
}
RIO_OP_READ(8, u8, 1)
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
*/
int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
{
- int res;
- unsigned long flags;
-
- spin_lock_irqsave(&rio_doorbell_lock, flags);
- res = mport->ops->dsend(mport, mport->id, destid, data);
- spin_unlock_irqrestore(&rio_doorbell_lock, flags);
-
- return res;
+ return mport->ops->dsend(mport, mport->id, destid, data);
}
EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d0e5d6ee882c..e2c1988cd7c0 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
if (*str == '=')
str++;
- if (!strncmp(str, "cec_disable", 7))
+ if (!strcmp(str, "cec_disable"))
ce_arr.disabled = 1;
else
return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index df63e44526ac..bf04479456a0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
select MFD_SYSCON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SMEM
select QCOM_MDT_LOADER
select QCOM_RPROC_COMMON
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 612d91403341..633268e9d550 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
+ if (!priv->mem[b].cpu_addr) {
dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
return err;
}
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e0c393214264..e2baecbb9dd3 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -34,11 +34,12 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
-config RESET_HSDK_V1
- bool "HSDK v1 Reset Driver"
- default n
+config RESET_HSDK
+ bool "Synopsys HSDK Reset Driver"
+ depends on HAS_IOMEM
+ depends on ARC_SOC_HSDK || COMPILE_TEST
help
- This enables the reset controller driver for HSDK v1.
+ This enables the reset controller driver for HSDK board.
config RESET_IMX7
bool "i.MX7 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d368367110e5..af1c15c330b3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
-obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o
+obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c
index bca13e4bf622..8bce391c6943 100644
--- a/drivers/reset/reset-hsdk-v1.c
+++ b/drivers/reset/reset-hsdk.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2017 Synopsys.
*
- * Synopsys HSDKv1 SDP reset driver.
+ * Synopsys HSDK Development platform reset driver.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -18,9 +18,9 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev)
+#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
-struct hsdkv1_rst {
+struct hsdk_rst {
void __iomem *regs_ctl;
void __iomem *regs_rst;
spinlock_t lock;
@@ -49,12 +49,12 @@ static const u32 rst_map[] = {
#define CGU_IP_SW_RESET_RESET BIT(0)
#define SW_RESET_TIMEOUT 10000
-static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id)
+static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
{
writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
}
-static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
+static int hsdk_reset_do(struct hsdk_rst *rst)
{
u32 reg;
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
!(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
}
-static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev,
+static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev);
+ struct hsdk_rst *rst = to_hsdk_rst(rcdev);
unsigned long flags;
int ret;
spin_lock_irqsave(&rst->lock, flags);
- hsdkv1_reset_config(rst, id);
- ret = hsdkv1_reset_do(rst);
+ hsdk_reset_config(rst, id);
+ ret = hsdk_reset_do(rst);
spin_unlock_irqrestore(&rst->lock, flags);
return ret;
}
-static const struct reset_control_ops hsdkv1_reset_ops = {
- .reset = hsdkv1_reset_reset,
+static const struct reset_control_ops hsdk_reset_ops = {
+ .reset = hsdk_reset_reset,
};
-static int hsdkv1_reset_probe(struct platform_device *pdev)
+static int hsdk_reset_probe(struct platform_device *pdev)
{
- struct hsdkv1_rst *rst;
+ struct hsdk_rst *rst;
struct resource *mem;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
spin_lock_init(&rst->lock);
rst->rcdev.owner = THIS_MODULE;
- rst->rcdev.ops = &hsdkv1_reset_ops;
+ rst->rcdev.ops = &hsdk_reset_ops;
rst->rcdev.of_node = pdev->dev.of_node;
rst->rcdev.nr_resets = HSDK_MAX_RESETS;
rst->rcdev.of_reset_n_cells = 1;
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
return reset_controller_register(&rst->rcdev);
}
-static const struct of_device_id hsdkv1_reset_dt_match[] = {
- { .compatible = "snps,hsdk-v1.0-reset" },
+static const struct of_device_id hsdk_reset_dt_match[] = {
+ { .compatible = "snps,hsdk-reset" },
{ },
};
-static struct platform_driver hsdkv1_reset_driver = {
- .probe = hsdkv1_reset_probe,
+static struct platform_driver hsdk_reset_driver = {
+ .probe = hsdk_reset_probe,
.driver = {
- .name = "hsdk-v1.0-reset",
- .of_match_table = hsdkv1_reset_dt_match,
+ .name = "hsdk-reset",
+ .of_match_table = hsdk_reset_dt_match,
},
};
-builtin_platform_driver(hsdkv1_reset_driver);
+builtin_platform_driver(hsdk_reset_driver);
MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver");
+MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5a5e927ea50f..5dcc9bf1c5bc 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
unsigned long flags;
intent = kzalloc(sizeof(*intent), GFP_KERNEL);
-
if (!intent)
return NULL;
intent->data = kzalloc(size, GFP_KERNEL);
if (!intent->data)
- return NULL;
+ goto free_intent;
spin_lock_irqsave(&channel->intent_lock, flags);
ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&channel->intent_lock, flags);
- return NULL;
+ goto free_data;
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
intent->reuse = reuseable;
return intent;
+
+free_data:
+ kfree(intent->data);
+free_intent:
+ kfree(intent);
+ return NULL;
}
static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
- return ret;
+ goto unlock;
ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = channel->intent_req_result ? 0 : -ECANCELED;
}
+unlock:
mutex_unlock(&channel->intent_req_lock);
return ret;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 785fb42f6650..2799a6b08f73 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
pr_err("write_pending failed since: %d\n", vscsi->flags);
- return 0;
+ return -EIO;
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bd4605a34f54..c62e8d111fd9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
/**
* iscsi_session_teardown - destroy session, host, and cls_session
* @cls_session: iscsi session
- *
- * The driver must have called iscsi_remove_session before
- * calling this.
*/
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
iscsi_pool_free(&session->cmdpool);
+ iscsi_remove_session(cls_session);
+
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->portal_type);
kfree(session->discovery_parent_type);
- iscsi_destroy_session(cls_session);
+ iscsi_free_session(cls_session);
+
iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e7818afeda2b..15590a063ad9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_UNMAP_LIMIT_WS)
+ sdev->unmap_limit_for_ws = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0190aeff5f7f..7404d26895f5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
- iscsi_remove_session(session);
- ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
- iscsi_free_session(session);
- return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
-/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fb9f8b5f4673..d175c5c5ccf8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS16_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS10_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
@@ -3099,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_security(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@@ -3115,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@@ -3129,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2fe216b276e2..84a8ac2a779f 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- if (!WARN_ON(disc == N_TTY)) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
- }
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
}
return retval;
}
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
if (tty->ldisc) {
if (reinit) {
- if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
- tty_ldisc_reinit(tty, N_TTY);
+ if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
+ tty_ldisc_reinit(tty, N_TTY) < 0)
+ WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
} else
tty_ldisc_kill(tty);
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dd74c99d6ce1..5d061b3d8224 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
+ struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
composite_dev_cleanup(cdev);
+ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a22a892de7b7..aeb9f3c40521 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
NULL
};
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner)
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner)
{
struct config_group *os_desc_group;
struct config_item_type *os_desc_type, *interface_type;
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
if (!vlabuf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
configfs_add_default_group(&d->group, os_desc_group);
}
- return 0;
+ return os_desc_group;
}
EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 36c468c4f5e9..540d5e92ed22 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -5,11 +5,12 @@
void unregister_gadget_item(struct config_item *item);
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner);
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner);
static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
{
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e1d5853ef1e4..c7c5b3ce1d98 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
free_netdev(opts->net);
}
+ kfree(opts->rndis_interf_group); /* single VLA chunk */
kfree(opts);
}
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
struct f_rndis_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
+ struct config_group *rndis_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
names[0] = "rndis";
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
- usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
- names, THIS_MODULE);
+ rndis_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+ if (IS_ERR(rndis_interf_group)) {
+ rndis_free_inst(&opts->func_inst);
+ return ERR_CAST(rndis_interf_group);
+ }
+ opts->rndis_interf_group = rndis_interf_group;
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index a35ee3c2545d..efdb7ac381d9 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -26,6 +26,7 @@ struct f_rndis_opts {
bool bound;
bool borrowed_net;
+ struct config_group *rndis_interf_group;
struct usb_os_desc rndis_os_desc;
char rndis_ext_compat_id[16];
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index b17618a55f1b..f04e91ef9e7c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
@@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
return;
set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
- if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
- unsigned reset = USB_PORT_STAT_RESET &
+ unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eee82ca55b7b..b3fc602b2e24 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,12 +202,13 @@ found:
return tmp;
}
- if (in) {
+ if (in)
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ if (out)
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- }
+
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
int status = 0;
struct urb *urbs[param->sglen];
+ if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
+ return -EINVAL;
+
memset(&context, 0, sizeof(context));
context.count = param->iterations * param->sglen;
context.dev = dev;
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
if (param->iterations <= 0)
return -EINVAL;
+ if (param->sglen > MAX_SGLEN)
+ return -EINVAL;
/*
* Just a bunch of test cases that every HCD is expected to handle.
*
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 5fe4a5704bde..ccc2bf5274b4 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
+ /*
+ * The USB driver may have already initiated the phy clock
+ * disable so wait to see if the clock turns off and if not
+ * then proceed with gating the clock.
+ */
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
+ return;
+
if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
+ /*
+ * The USB driver may have already initiated the phy clock
+ * enable so wait to see if the clock turns on and if not
+ * then proceed with ungating the clock.
+ */
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID) == 0)
+ return;
+
if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 68f26904c316..50285b01da92 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work)
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
- usbhsf_dma_start(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index fdf89800ebc3..43a862a90a77 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
+ info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
@@ -265,7 +266,7 @@ static struct console usbcons = {
void usb_serial_console_disconnect(struct usb_serial *serial)
{
- if (serial->port[0] == usbcons_info.port) {
+ if (serial->port[0] && serial->port[0] == usbcons_info.port) {
usb_serial_console_exit();
usb_serial_put(serial);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2d945c9f975c..412f812522ee 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CP210X_PARTNUM_CP2104 0x04
#define CP210X_PARTNUM_CP2105 0x05
#define CP210X_PARTNUM_CP2108 0x08
+#define CP210X_PARTNUM_UNKNOWN 0xFF
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
struct cp210x_comm_status {
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PARTNUM, &priv->partnum,
sizeof(priv->partnum));
- if (result < 0)
- goto err_free_priv;
+ if (result < 0) {
+ dev_warn(&serial->interface->dev,
+ "querying part number failed\n");
+ priv->partnum = CP210X_PARTNUM_UNKNOWN;
+ }
usb_set_serial_data(serial, priv);
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
}
return 0;
-err_free_priv:
- kfree(priv);
-
- return result;
}
static void cp210x_disconnect(struct usb_serial *serial)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1cec03799cdf..49d1b2d4606d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1cecb6d7..f9d15bd62785 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID 0x04B4
+#define CYPRESS_WICED_BT_USB_PID 0x009B
+#define CYPRESS_WICED_WL_USB_PID 0xF900
+
+/*
* Microchip Technology, Inc.
*
* MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 54bfef13966a..ba672cf4e888 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ebc0beea69d6..eb9928963a53 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adaf6f6dd858..e1cbdfdb7c68 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
- if (unlikely(copied < len && !PageUptodate(page))) {
- copied = 0;
- goto out;
+ if (!PageUptodate(page)) {
+ if (unlikely(copied < len)) {
+ copied = 0;
+ goto out;
+ } else if (len == PAGE_SIZE) {
+ SetPageUptodate(page);
+ }
}
/*
* No need to use i_size_read() here, the i_size
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index ce7181ea60fa..a7c5a9861bef 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -54,7 +54,7 @@ typedef struct {
int size; /* size of magic/mask */
char *magic; /* magic or filename extension */
char *mask; /* mask, NULL for exact match */
- char *interpreter; /* filename of interpreter */
+ const char *interpreter; /* filename of interpreter */
char *name;
struct dentry *dentry;
struct file *interp_file;
@@ -131,27 +131,26 @@ static int load_misc_binary(struct linux_binprm *bprm)
{
Node *fmt;
struct file *interp_file = NULL;
- char iname[BINPRM_BUF_SIZE];
- const char *iname_addr = iname;
int retval;
int fd_binary = -1;
retval = -ENOEXEC;
if (!enabled)
- goto ret;
+ return retval;
/* to keep locking time low, we copy the interpreter string */
read_lock(&entries_lock);
fmt = check_file(bprm);
if (fmt)
- strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE);
+ dget(fmt->dentry);
read_unlock(&entries_lock);
if (!fmt)
- goto ret;
+ return retval;
/* Need to be able to load the file after exec */
+ retval = -ENOENT;
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
- return -ENOENT;
+ goto ret;
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
retval = remove_arg_zero(bprm);
@@ -195,22 +194,22 @@ static int load_misc_binary(struct linux_binprm *bprm)
bprm->argc++;
/* add the interp as argv[0] */
- retval = copy_strings_kernel(1, &iname_addr, bprm);
+ retval = copy_strings_kernel(1, &fmt->interpreter, bprm);
if (retval < 0)
goto error;
bprm->argc++;
/* Update interp in case binfmt_script needs it. */
- retval = bprm_change_interp(iname, bprm);
+ retval = bprm_change_interp(fmt->interpreter, bprm);
if (retval < 0)
goto error;
- if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) {
+ if (fmt->flags & MISC_FMT_OPEN_FILE) {
interp_file = filp_clone_open(fmt->interp_file);
if (!IS_ERR(interp_file))
deny_write_access(interp_file);
} else {
- interp_file = open_exec(iname);
+ interp_file = open_exec(fmt->interpreter);
}
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
@@ -238,6 +237,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto error;
ret:
+ dput(fmt->dentry);
return retval;
error:
if (fd_binary > 0)
@@ -594,8 +594,13 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
static void bm_evict_inode(struct inode *inode)
{
+ Node *e = inode->i_private;
+
+ if (e && e->flags & MISC_FMT_OPEN_FILE)
+ filp_close(e->interp_file, NULL);
+
clear_inode(inode);
- kfree(inode->i_private);
+ kfree(e);
}
static void kill_node(Node *e)
@@ -603,24 +608,14 @@ static void kill_node(Node *e)
struct dentry *dentry;
write_lock(&entries_lock);
- dentry = e->dentry;
- if (dentry) {
- list_del_init(&e->list);
- e->dentry = NULL;
- }
+ list_del_init(&e->list);
write_unlock(&entries_lock);
- if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) {
- filp_close(e->interp_file, NULL);
- e->interp_file = NULL;
- }
-
- if (dentry) {
- drop_nlink(d_inode(dentry));
- d_drop(dentry);
- dput(dentry);
- simple_release_fs(&bm_mnt, &entry_count);
- }
+ dentry = e->dentry;
+ drop_nlink(d_inode(dentry));
+ d_drop(dentry);
+ dput(dentry);
+ simple_release_fs(&bm_mnt, &entry_count);
}
/* /<entry> */
@@ -665,7 +660,8 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
root = file_inode(file)->i_sb->s_root;
inode_lock(d_inode(root));
- kill_node(e);
+ if (!list_empty(&e->list))
+ kill_node(e);
inode_unlock(d_inode(root));
break;
@@ -794,7 +790,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
inode_lock(d_inode(root));
while (!list_empty(&entries))
- kill_node(list_entry(entries.next, Node, list));
+ kill_node(list_first_entry(&entries, Node, list));
inode_unlock(d_inode(root));
break;
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index afdf4e3cafc2..7cde3f46ad26 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -19,7 +19,6 @@ static int load_script(struct linux_binprm *bprm)
const char *i_arg, *i_name;
char *cp;
struct file *file;
- char interp[BINPRM_BUF_SIZE];
int retval;
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
@@ -55,7 +54,7 @@ static int load_script(struct linux_binprm *bprm)
break;
}
for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
- if (*cp == '\0')
+ if (*cp == '\0')
return -ENOEXEC; /* No interpreter name found */
i_name = cp;
i_arg = NULL;
@@ -65,7 +64,6 @@ static int load_script(struct linux_binprm *bprm)
*cp++ = '\0';
if (*cp)
i_arg = cp;
- strcpy (interp, i_name);
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
@@ -80,24 +78,27 @@ static int load_script(struct linux_binprm *bprm)
if (retval)
return retval;
retval = copy_strings_kernel(1, &bprm->interp, bprm);
- if (retval < 0) return retval;
+ if (retval < 0)
+ return retval;
bprm->argc++;
if (i_arg) {
retval = copy_strings_kernel(1, &i_arg, bprm);
- if (retval < 0) return retval;
+ if (retval < 0)
+ return retval;
bprm->argc++;
}
retval = copy_strings_kernel(1, &i_name, bprm);
- if (retval) return retval;
+ if (retval)
+ return retval;
bprm->argc++;
- retval = bprm_change_interp(interp, bprm);
+ retval = bprm_change_interp(i_name, bprm);
if (retval < 0)
return retval;
/*
* OK, now restart the process with the interpreter's dentry.
*/
- file = open_exec(interp);
+ file = open_exec(i_name);
if (IS_ERR(file))
return PTR_ERR(file);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 93d088ffc05c..789f55e851ae 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
set_page_writeback(page);
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
- if (result)
+ if (result) {
end_page_writeback(page);
- else
+ } else {
+ clean_page_buffers(page);
unlock_page(page);
+ }
blk_queue_exit(bdev->bd_queue);
return result;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 899ddaeeacec..8fc690384c58 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -722,7 +722,7 @@ struct btrfs_delayed_root;
* Indicate that a whole-filesystem exclusive operation is running
* (device replace, resize, device add/delete, balance)
*/
-#define BTRFS_FS_EXCL_OP 14
+#define BTRFS_FS_EXCL_OP 16
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 12ab19a4b93e..970190cd347e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
}
}
- bio = btrfs_bio_alloc(bdev, sector << 9);
+ bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 84edfc60d87a..f23c820daaed 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -734,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
inode = req->r_inode;
ihold(inode);
} else {
- /* req->r_dentry is non-null for LSSNAP request.
- * fall-thru */
- WARN_ON_ONCE(!req->r_dentry);
+ /* req->r_dentry is non-null for LSSNAP request */
+ rcu_read_lock();
+ inode = get_nonsnap_parent(req->r_dentry);
+ rcu_read_unlock();
+ dout("__choose_mds using snapdir's parent %p\n", inode);
}
- }
- if (!inode && req->r_dentry) {
+ } else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
struct dentry *parent;
struct inode *dir;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 1ffc8b426c1c..7fc0b850c352 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
realm->ino, realm, snapc, snapc->seq,
(unsigned int) snapc->num_snaps);
- if (realm->cached_context) {
- ceph_put_snap_context(realm->cached_context);
- /* queue realm for cap_snap creation */
- list_add_tail(&realm->dirty_item, dirty_realms);
- }
+ ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
+ /* queue realm for cap_snap creation */
+ list_add_tail(&realm->dirty_item, dirty_realms);
return 0;
fail:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 62cf812ed0e5..96415c65bbdc 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -866,7 +866,8 @@ out:
*/
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- dio_bio_submit(dio, sdio);
+ if (sdio->bio)
+ dio_bio_submit(dio, sdio);
put_page(sdio->cur_page);
sdio->cur_page = NULL;
}
diff --git a/fs/exec.c b/fs/exec.c
index ac34d9724684..5470d3c1892a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1410,7 +1410,7 @@ static void free_bprm(struct linux_binprm *bprm)
kfree(bprm);
}
-int bprm_change_interp(char *interp, struct linux_binprm *bprm)
+int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
{
/* If a binfmt changed the interp, free it first. */
if (bprm->interp != bprm->filename)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a7c90386947..4b4a72f392be 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
void stop_discard_thread(struct f2fs_sb_info *sbi);
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void release_discard_addrs(struct f2fs_sb_info *sbi);
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 621b9b3d320b..c695ff462ee6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
}
/* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
{
__issue_discard_cmd(sbi, false);
__drop_discard_cmd(sbi);
- __wait_discard_cmd(sbi, false);
+ __wait_discard_cmd(sbi, !umount);
}
static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
}
/* It's time to issue all the filed discards */
mark_discard_range_all(sbi);
- f2fs_wait_discard_bios(sbi);
+ f2fs_wait_discard_bios(sbi, false);
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 89f61eb3d167..933c3d529e65 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- f2fs_wait_discard_bios(sbi);
+ f2fs_wait_discard_bios(sbi, true);
if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
struct cp_control cpc = {
diff --git a/fs/mpage.c b/fs/mpage.c
index 37bb77c1302c..c991faec70b9 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
try_to_free_buffers(page);
}
+/*
+ * For situations where we want to clean all buffers attached to a page.
+ * We don't need to calculate how many buffers are attached to the page,
+ * we just need to specify a number larger than the maximum number of buffers.
+ */
+void clean_page_buffers(struct page *page)
+{
+ clean_buffers(page, ~0U);
+}
+
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
@@ -605,10 +615,8 @@ alloc_new:
if (bio == NULL) {
if (first_unmapped == blocks_per_page) {
if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
- page, wbc)) {
- clean_buffers(page, first_unmapped);
+ page, wbc))
goto out;
- }
}
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/namespace.c b/fs/namespace.c
index 54059b142d6b..3b601f115b6c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file)
/* File refers to upper, writable layer? */
upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
- if (upperdentry && file_inode(file) == d_inode(upperdentry))
+ if (upperdentry &&
+ (file_inode(file) == d_inode(upperdentry) ||
+ file_inode(file) == d_inode(dentry)))
return 0;
/* Lower layer: can't write to real file, sorry... */
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index efebe6cf4378..22880ef6d8dd 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
static void pnfs_init_server(struct nfs_server *server)
{
rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
- rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
}
#else
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
ida_init(&server->openowner_id);
ida_init(&server->lockowner_id);
pnfs_init_server(server);
+ rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
return server;
}
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44c638b7876c..508126eb49f9 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
- nfs4_fl_put_deviceid(fl->dsaddr);
+ if (fl->dsaddr != NULL)
+ nfs4_fl_put_deviceid(fl->dsaddr);
/* This assumes a single RW lseg */
if (lseg->pls_range.iomode == IOMODE_RW) {
struct nfs4_filelayout *flo;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index dd5d27da8c0c..30426c1a1bbd 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
ssize_t ret;
ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
- if (ret <= 0)
+ if (ret < 0)
return ERR_PTR(ret);
rkey = request_key(&key_type_id_resolver, desc, "");
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6c61e2b99635..f90090e8c959 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
lo = NFS_I(inode)->layout;
/* If the open stateid was bad, then recover it. */
if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
- nfs4_stateid_match_other(&lgp->args.stateid,
- &lgp->args.ctx->state->stateid)) {
+ !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
spin_unlock(&inode->i_lock);
exception->state = lgp->args.ctx->state;
exception->stateid = &lgp->args.stateid;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 37c8af003275..14ed9791ec9c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr,
* Assumes OPEN is the biggest non-idempotent compound.
* 2 is the verifier.
*/
- max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
- RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
+ max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2)
+ * XDR_UNIT + RPC_MAX_AUTH_SIZE;
encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c69db7d4905..8487486ec496 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
exp_put(u->secinfo.si_exp);
}
+static void
+nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
+{
+ if (u->secinfo_no_name.sin_exp)
+ exp_put(u->secinfo_no_name.sin_exp);
+}
+
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_SECINFO_NO_NAME] = {
.op_func = nfsd4_secinfo_no_name,
- .op_release = nfsd4_secinfo_release,
+ .op_release = nfsd4_secinfo_no_name_release,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
.op_rsize_bop = nfsd4_secinfo_rsize,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aad97b30d5e6..c441f9387a1b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
c->tmpfile = true;
err = ovl_copy_up_locked(c);
} else {
- err = -EIO;
- if (lock_rename(c->workdir, c->destdir) != NULL) {
- pr_err("overlayfs: failed to lock workdir+upperdir\n");
- } else {
+ err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+ if (!err) {
err = ovl_copy_up_locked(c);
unlock_rename(c->workdir, c->destdir);
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 3309b1912241..cc961a3bd3bd 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -216,26 +216,6 @@ out_unlock:
return err;
}
-static int ovl_lock_rename_workdir(struct dentry *workdir,
- struct dentry *upperdir)
-{
- /* Workdir should not be the same as upperdir */
- if (workdir == upperdir)
- goto err;
-
- /* Workdir should not be subdir of upperdir and vice versa */
- if (lock_rename(workdir, upperdir) != NULL)
- goto err_unlock;
-
- return 0;
-
-err_unlock:
- unlock_rename(workdir, upperdir);
-err:
- pr_err("overlayfs: failed to lock workdir+upperdir\n");
- return -EIO;
-}
-
static struct dentry *ovl_clear_empty(struct dentry *dentry,
struct list_head *list)
{
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index c3addd1114f1..654bea1a5ac9 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
if (IS_ERR(index)) {
+ err = PTR_ERR(index);
pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index d4e8c1a08fb0..c706a6f99928 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
void ovl_inuse_unlock(struct dentry *dentry);
int ovl_nlink_start(struct dentry *dentry, bool *locked);
void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
static inline bool ovl_is_impuredir(struct dentry *dentry)
{
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 878a750986dd..25d9b5adcd42 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -37,6 +37,9 @@ struct ovl_fs {
bool noxattr;
/* sb common to all layers */
struct super_block *same_sb;
+ /* Did we take the inuse lock? */
+ bool upperdir_locked;
+ bool workdir_locked;
};
/* private information held for every overlayfs dentry */
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 62e9b22a2077..0f85ee9c3268 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
struct path *lowerstack, unsigned int numlower)
{
int err;
+ struct dentry *index = NULL;
struct inode *dir = dentry->d_inode;
struct path path = { .mnt = mnt, .dentry = dentry };
LIST_HEAD(list);
@@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
inode_lock_nested(dir, I_MUTEX_PARENT);
list_for_each_entry(p, &list, l_node) {
- struct dentry *index;
-
if (p->name[0] == '.') {
if (p->len == 1)
continue;
@@ -1018,6 +1017,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
index = lookup_one_len(p->name, dentry, p->len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
+ index = NULL;
break;
}
err = ovl_verify_index(index, lowerstack, numlower);
@@ -1029,7 +1029,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
break;
}
dput(index);
+ index = NULL;
}
+ dput(index);
inode_unlock(dir);
out:
ovl_cache_free(&list);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index fd5ea4facc62..092d150643c1 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -211,9 +211,10 @@ static void ovl_put_super(struct super_block *sb)
dput(ufs->indexdir);
dput(ufs->workdir);
- ovl_inuse_unlock(ufs->workbasedir);
+ if (ufs->workdir_locked)
+ ovl_inuse_unlock(ufs->workbasedir);
dput(ufs->workbasedir);
- if (ufs->upper_mnt)
+ if (ufs->upper_mnt && ufs->upperdir_locked)
ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
mntput(ufs->upper_mnt);
for (i = 0; i < ufs->numlower; i++)
@@ -881,9 +882,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_upperpath;
err = -EBUSY;
- if (!ovl_inuse_trylock(upperpath.dentry)) {
- pr_err("overlayfs: upperdir is in-use by another mount\n");
+ if (ovl_inuse_trylock(upperpath.dentry)) {
+ ufs->upperdir_locked = true;
+ } else if (ufs->config.index) {
+ pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
goto out_put_upperpath;
+ } else {
+ pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
}
err = ovl_mount_dir(ufs->config.workdir, &workpath);
@@ -901,9 +906,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
}
err = -EBUSY;
- if (!ovl_inuse_trylock(workpath.dentry)) {
- pr_err("overlayfs: workdir is in-use by another mount\n");
+ if (ovl_inuse_trylock(workpath.dentry)) {
+ ufs->workdir_locked = true;
+ } else if (ufs->config.index) {
+ pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
goto out_put_workpath;
+ } else {
+ pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
}
ufs->workbasedir = workpath.dentry;
@@ -1156,11 +1165,13 @@ out_put_lowerpath:
out_free_lowertmp:
kfree(lowertmp);
out_unlock_workdentry:
- ovl_inuse_unlock(workpath.dentry);
+ if (ufs->workdir_locked)
+ ovl_inuse_unlock(workpath.dentry);
out_put_workpath:
path_put(&workpath);
out_unlock_upperdentry:
- ovl_inuse_unlock(upperpath.dentry);
+ if (ufs->upperdir_locked)
+ ovl_inuse_unlock(upperpath.dentry);
out_put_upperpath:
path_put(&upperpath);
out_free_config:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 117794582f9f..b9b239fa5cfd 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
}
}
-/* Called must hold OVL_I(inode)->oi_lock */
+/* Caller must hold OVL_I(inode)->lock */
static void ovl_cleanup_index(struct dentry *dentry)
{
struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
@@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
err = PTR_ERR(index);
if (!IS_ERR(index))
err = ovl_cleanup(dir, index);
+ else
+ index = NULL;
+
inode_unlock(dir);
if (err)
goto fail;
@@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
mutex_unlock(&OVL_I(d_inode(dentry))->lock);
}
}
+
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
+{
+ /* Workdir should not be the same as upperdir */
+ if (workdir == upperdir)
+ goto err;
+
+ /* Workdir should not be subdir of upperdir and vice versa */
+ if (lock_rename(workdir, upperdir) != NULL)
+ goto err_unlock;
+
+ return 0;
+
+err_unlock:
+ unlock_rename(workdir, upperdir);
+err:
+ pr_err("overlayfs: failed to lock workdir+upperdir\n");
+ return -EIO;
+}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 50b0556a124f..52ad15192e72 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
spin_lock(&dquot->dq_dqb_lock);
if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
- goto add;
+ goto finish;
tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ space + rsv_space;
- if (flags & DQUOT_SPACE_NOFAIL)
- goto add;
-
if (dquot->dq_dqb.dqb_bhardlimit &&
tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
ret = -EDQUOT;
- goto out;
+ goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
ret = -EDQUOT;
- goto out;
+ goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
* be always printed
*/
ret = -EDQUOT;
- goto out;
+ goto finish;
}
}
-add:
- dquot->dq_dqb.dqb_rsvspace += rsv_space;
- dquot->dq_dqb.dqb_curspace += space;
-out:
+finish:
+ /*
+ * We have to be careful and go through warning generation & grace time
+ * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
+ * only here...
+ */
+ if (flags & DQUOT_SPACE_NOFAIL)
+ ret = 0;
+ if (!ret) {
+ dquot->dq_dqb.dqb_rsvspace += rsv_space;
+ dquot->dq_dqb.dqb_curspace += space;
+ }
spin_unlock(&dquot->dq_dqb_lock);
return ret;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ef4b48d1ea42..1c713fd5b3e6 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -588,6 +588,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
break;
if (ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
+ /*
+ * &ewq->wq may be queued in fork_event, but
+ * __remove_wait_queue ignores the head
+ * parameter. It would be a problem if it
+ * didn't.
+ */
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
if (ewq->msg.event == UFFD_EVENT_FORK) {
struct userfaultfd_ctx *new;
@@ -1061,6 +1067,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
(unsigned long)
uwq->msg.arg.reserved.reserved1;
list_move(&uwq->wq.entry, &fork_event);
+ /*
+ * fork_nctx can be freed as soon as
+ * we drop the lock, unless we take a
+ * reference on it.
+ */
+ userfaultfd_ctx_get(fork_nctx);
spin_unlock(&ctx->event_wqh.lock);
ret = 0;
break;
@@ -1091,19 +1103,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
if (!ret && msg->event == UFFD_EVENT_FORK) {
ret = resolve_userfault_fork(ctx, fork_nctx, msg);
+ spin_lock(&ctx->event_wqh.lock);
+ if (!list_empty(&fork_event)) {
+ /*
+ * The fork thread didn't abort, so we can
+ * drop the temporary refcount.
+ */
+ userfaultfd_ctx_put(fork_nctx);
+
+ uwq = list_first_entry(&fork_event,
+ typeof(*uwq),
+ wq.entry);
+ /*
+ * If fork_event list wasn't empty and in turn
+ * the event wasn't already released by fork
+ * (the event is allocated on fork kernel
+ * stack), put the event back to its place in
+ * the event_wq. fork_event head will be freed
+ * as soon as we return so the event cannot
+ * stay queued there no matter the current
+ * "ret" value.
+ */
+ list_del(&uwq->wq.entry);
+ __add_wait_queue(&ctx->event_wqh, &uwq->wq);
- if (!ret) {
- spin_lock(&ctx->event_wqh.lock);
- if (!list_empty(&fork_event)) {
- uwq = list_first_entry(&fork_event,
- typeof(*uwq),
- wq.entry);
- list_del(&uwq->wq.entry);
- __add_wait_queue(&ctx->event_wqh, &uwq->wq);
+ /*
+ * Leave the event in the waitqueue and report
+ * error to userland if we failed to resolve
+ * the userfault fork.
+ */
+ if (likely(!ret))
userfaultfd_event_complete(ctx, uwq);
- }
- spin_unlock(&ctx->event_wqh.lock);
+ } else {
+ /*
+ * Here the fork thread aborted and the
+ * refcount from the fork thread on fork_nctx
+ * has already been released. We still hold
+ * the reference we took before releasing the
+ * lock above. If resolve_userfault_fork
+ * failed we've to drop it because the
+ * fork_nctx has to be freed in such case. If
+ * it succeeded we'll hold it because the new
+ * uffd references it.
+ */
+ if (ret)
+ userfaultfd_ctx_put(fork_nctx);
}
+ spin_unlock(&ctx->event_wqh.lock);
}
return ret;
diff --git a/fs/xattr.c b/fs/xattr.c
index 4424f7fecf14..61cd28ba25f3 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -250,7 +250,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
}
memcpy(value, buffer, len);
out:
- security_release_secctx(buffer, len);
+ kfree(buffer);
out_noalloc:
return len;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 744dcaec34cc..f965ce832bc0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
bp = xfs_btree_get_bufs(args->mp, args->tp,
args->agno, fbno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
xfs_trans_binval(args->tp, bp);
}
args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto out_agbp_relse;
+ }
xfs_trans_binval(tp, bp);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 044a363119be..def32fa1c225 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1477,14 +1477,14 @@ xfs_bmap_isaeof(
int is_empty;
int error;
- bma->aeof = 0;
+ bma->aeof = false;
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
&is_empty);
if (error)
return error;
if (is_empty) {
- bma->aeof = 1;
+ bma->aeof = true;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 988bb3f31446..dfd643909f85 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
rec.ir_free == XFS_INOBT_ALL_FREE &&
mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
- xic->deleted = 1;
+ xic->deleted = true;
xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
xfs_difree_inode_chunk(mp, agno, &rec, dfops);
} else {
- xic->deleted = 0;
+ xic->deleted = false;
error = xfs_inobt_update(cur, &rec);
if (error) {
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8372e9bcd7b6..71de185735e0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
uint32_t ilf_fields; /* flags for fields logged */
uint16_t ilf_asize; /* size of attr d/ext/root */
uint16_t ilf_dsize; /* size of data/ext/root */
+ uint32_t ilf_pad; /* pad for 64 bit boundary */
uint64_t ilf_ino; /* inode number */
union {
uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
int32_t ilf_boffset; /* off of inode in buffer */
} xfs_inode_log_format_t;
-typedef struct xfs_inode_log_format_32 {
- uint16_t ilf_type; /* inode log item type */
- uint16_t ilf_size; /* size of this item */
- uint32_t ilf_fields; /* flags for fields logged */
- uint16_t ilf_asize; /* size of attr d/ext/root */
- uint16_t ilf_dsize; /* size of data/ext/root */
- uint64_t ilf_ino; /* inode number */
- union {
- uint32_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
- } ilf_u;
- int64_t ilf_blkno; /* blkno of inode buffer */
- int32_t ilf_len; /* len of inode buffer */
- int32_t ilf_boffset; /* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
+/*
+ * Old 32 bit systems will log in this format without the 64 bit
+ * alignment padding. Recovery will detect this and convert it to the
+ * correct format.
+ */
+struct xfs_inode_log_format_32 {
uint16_t ilf_type; /* inode log item type */
uint16_t ilf_size; /* size of this item */
uint32_t ilf_fields; /* flags for fields logged */
uint16_t ilf_asize; /* size of attr d/ext/root */
uint16_t ilf_dsize; /* size of data/ext/root */
- uint32_t ilf_pad; /* pad for 64 bit boundary */
uint64_t ilf_ino; /* inode number */
union {
uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
int64_t ilf_blkno; /* blkno of inode buffer */
int32_t ilf_len; /* len of inode buffer */
int32_t ilf_boffset; /* off of inode in buffer */
-} xfs_inode_log_format_64_t;
+} __attribute__((packed));
/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7034e17535de..3354140de07e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
int
xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ umode_t mode;
+ bool set_mode = false;
int error = 0;
if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
if (type == ACL_TYPE_ACCESS) {
- umode_t mode;
-
error = posix_acl_update_mode(inode, &mode, &acl);
if (error)
return error;
- error = xfs_set_mode(inode, mode);
- if (error)
- return error;
+ set_mode = true;
}
set_acl:
- return __xfs_set_acl(inode, acl, type);
+ error = __xfs_set_acl(inode, acl, type);
+ if (error)
+ return error;
+
+ /*
+ * We set the mode after successfully updating the ACL xattr because the
+ * xattr update can fail at ENOSPC and we don't want to change the mode
+ * if the ACL update hasn't been applied.
+ */
+ if (set_mode)
+ error = xfs_set_mode(inode, mode);
+
+ return error;
}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index ebd66b19fbfc..e3a950ed35a8 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
&bp, XFS_ATTR_FORK);
if (error)
return error;
+ node = bp->b_addr;
+ btree = dp->d_ops->node_tree_p(node);
child_fsb = be32_to_cpu(btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index bc6c6e10a969..6503cfa44262 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
GFP_NOFS, 0);
}
+#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
}
return 0;
}
+#endif /* CONFIG_XFS_RT */
/*
* Check if the endoff is outside the last extent. If so the caller will grow
@@ -2122,11 +2124,31 @@ xfs_swap_extents(
ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+ }
+
+ /* Swap the cow forks. */
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ xfs_extnum_t extnum;
+
+ ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+
+ extnum = ip->i_cnextents;
+ ip->i_cnextents = tip->i_cnextents;
+ tip->i_cnextents = extnum;
+
cowfp = ip->i_cowfp;
ip->i_cowfp = tip->i_cowfp;
tip->i_cowfp = cowfp;
- xfs_inode_set_cowblocks_tag(ip);
- xfs_inode_set_cowblocks_tag(tip);
+
+ if (ip->i_cowfp && ip->i_cnextents)
+ xfs_inode_set_cowblocks_tag(ip);
+ else
+ xfs_inode_clear_cowblocks_tag(ip);
+ if (tip->i_cowfp && tip->i_cnextents)
+ xfs_inode_set_cowblocks_tag(tip);
+ else
+ xfs_inode_clear_cowblocks_tag(tip);
}
xfs_trans_log_inode(tp, ip, src_log_flags);
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0eaa81dc49be..7d330b3c77c3 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
struct xfs_trans;
struct xfs_bmalloca;
+#ifdef CONFIG_XFS_RT
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+#else /* !CONFIG_XFS_RT */
+/*
+ * Attempts to allocate RT extents when RT is disable indicates corruption and
+ * should trigger a shutdown.
+ */
+static inline int
+xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+{
+ return -EFSCORRUPTED;
+}
+#endif /* CONFIG_XFS_RT */
+
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 309e26c9dddb..56d0e526870c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -764,7 +764,7 @@ xfs_file_fallocate(
enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL;
loff_t new_size = 0;
- bool do_file_insert = 0;
+ bool do_file_insert = false;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
@@ -825,7 +825,7 @@ xfs_file_fallocate(
error = -EINVAL;
goto out_unlock;
}
- do_file_insert = 1;
+ do_file_insert = true;
} else {
flags |= XFS_PREALLOC_SET;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 814ed729881d..560e0b40ac1b 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -521,6 +521,7 @@ __xfs_getfsmap_rtdev(
return query_fn(tp, info);
}
+#ifdef CONFIG_XFS_RT
/* Actually query the realtime bitmap. */
STATIC int
xfs_getfsmap_rtdev_rtbitmap_query(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
info);
}
+#endif /* CONFIG_XFS_RT */
/* Execute a getfsmap query against the regular data device. */
STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
return false;
}
+/*
+ * There are only two devices if we didn't configure RT devices at build time.
+ */
+#ifdef CONFIG_XFS_RT
#define XFS_GETFSMAP_DEVS 3
+#else
+#define XFS_GETFSMAP_DEVS 2
+#endif /* CONFIG_XFS_RT */
+
/*
* Get filesystem's extents as described in head, and format for
* output. Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
handlers[1].fn = xfs_getfsmap_logdev;
}
+#ifdef CONFIG_XFS_RT
if (mp->m_rtdev_targp) {
handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
}
+#endif /* CONFIG_XFS_RT */
xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
xfs_getfsmap_dev_compare);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index a705f34b58fa..9bbc2d7cc8cb 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
to->di_dmstate = from->di_dmstate;
to->di_flags = from->di_flags;
+ /* log a dummy value to ensure log structure is fully initialised */
+ to->di_next_unlinked = NULLAGINO;
+
if (from->di_version == 3) {
to->di_changecount = inode->i_version;
to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
* the second with the on-disk inode structure, and a possible third and/or
* fourth with the inode data/extents/b-tree root and inode attributes
* data/extents/b-tree root.
+ *
+ * Note: Always use the 64 bit inode log format structure so we don't
+ * leave an uninitialised hole in the format item on 64 bit systems. Log
+ * recovery on 32 bit systems handles this just fine, so there's no reason
+ * for not using an initialising the properly padded structure all the time.
*/
STATIC void
xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
- struct xfs_inode_log_format *ilf;
struct xfs_log_iovec *vecp = NULL;
+ struct xfs_inode_log_format *ilf;
ASSERT(ip->i_d.di_version > 1);
@@ -425,7 +433,17 @@ xfs_inode_item_format(
ilf->ilf_boffset = ip->i_imap.im_boffset;
ilf->ilf_fields = XFS_ILOG_CORE;
ilf->ilf_size = 2; /* format + core */
- xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
+
+ /*
+ * make sure we don't leak uninitialised data into the log in the case
+ * when we don't log every field in the inode.
+ */
+ ilf->ilf_dsize = 0;
+ ilf->ilf_asize = 0;
+ ilf->ilf_pad = 0;
+ uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
+
+ xlog_finish_iovec(lv, vecp, sizeof(*ilf));
xfs_inode_item_format_core(ip, lv, &vecp);
xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -855,44 +873,29 @@ xfs_istale_done(
}
/*
- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
- * (which can have different field alignments) to the native version
+ * convert an xfs_inode_log_format struct from the old 32 bit version
+ * (which can have different field alignments) to the native 64 bit version
*/
int
xfs_inode_item_format_convert(
- xfs_log_iovec_t *buf,
- xfs_inode_log_format_t *in_f)
+ struct xfs_log_iovec *buf,
+ struct xfs_inode_log_format *in_f)
{
- if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
- xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
-
- in_f->ilf_type = in_f32->ilf_type;
- in_f->ilf_size = in_f32->ilf_size;
- in_f->ilf_fields = in_f32->ilf_fields;
- in_f->ilf_asize = in_f32->ilf_asize;
- in_f->ilf_dsize = in_f32->ilf_dsize;
- in_f->ilf_ino = in_f32->ilf_ino;
- /* copy biggest field of ilf_u */
- uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
- in_f->ilf_blkno = in_f32->ilf_blkno;
- in_f->ilf_len = in_f32->ilf_len;
- in_f->ilf_boffset = in_f32->ilf_boffset;
- return 0;
- } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
- xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
-
- in_f->ilf_type = in_f64->ilf_type;
- in_f->ilf_size = in_f64->ilf_size;
- in_f->ilf_fields = in_f64->ilf_fields;
- in_f->ilf_asize = in_f64->ilf_asize;
- in_f->ilf_dsize = in_f64->ilf_dsize;
- in_f->ilf_ino = in_f64->ilf_ino;
- /* copy biggest field of ilf_u */
- uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
- in_f->ilf_blkno = in_f64->ilf_blkno;
- in_f->ilf_len = in_f64->ilf_len;
- in_f->ilf_boffset = in_f64->ilf_boffset;
- return 0;
- }
- return -EFSCORRUPTED;
+ struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
+
+ if (buf->i_len != sizeof(*in_f32))
+ return -EFSCORRUPTED;
+
+ in_f->ilf_type = in_f32->ilf_type;
+ in_f->ilf_size = in_f32->ilf_size;
+ in_f->ilf_fields = in_f32->ilf_fields;
+ in_f->ilf_asize = in_f32->ilf_asize;
+ in_f->ilf_dsize = in_f32->ilf_dsize;
+ in_f->ilf_ino = in_f32->ilf_ino;
+ /* copy biggest field of ilf_u */
+ uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
+ in_f->ilf_blkno = in_f32->ilf_blkno;
+ in_f->ilf_len = in_f32->ilf_len;
+ in_f->ilf_boffset = in_f32->ilf_boffset;
+ return 0;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c5107c7bc4bf..dc95a49d62e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2515,7 +2515,7 @@ next_lv:
if (lv)
vecp = lv->lv_iovecp;
}
- if (record_cnt == 0 && ordered == false) {
+ if (record_cnt == 0 && !ordered) {
if (!lv)
return 0;
break;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ea7d4b4e50d0..e9727d0a541a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -704,7 +704,7 @@ xfs_mountfs(
xfs_set_maxicount(mp);
/* enable fail_at_unmount as default */
- mp->m_fail_unmount = 1;
+ mp->m_fail_unmount = true;
error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
if (error)
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d71b242..0492436a053f 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
- XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 3246815c24d6..37e603bf1591 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -736,7 +736,13 @@ xfs_reflink_end_cow(
/* If there is a hole at end_fsb - 1 go to the previous extent */
if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
got.br_startoff > end_fsb) {
- ASSERT(idx > 0);
+ /*
+ * In case of racing, overlapping AIO writes no COW extents
+ * might be left by the time I/O completes for the loser of
+ * the race. In that case we are done.
+ */
+ if (idx <= 0)
+ goto out_cancel;
xfs_iext_get_extent(ifp, --idx, &got);
}
@@ -809,6 +815,7 @@ next_extent:
out_defer:
xfs_defer_cancel(&dfops);
+out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h
new file mode 100644
index 000000000000..e1a643e4bc91
--- /dev/null
+++ b/include/dt-bindings/reset/snps,hsdk-reset.h
@@ -0,0 +1,17 @@
+/**
+ * This header provides index for the HSDK reset controller.
+ */
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
+#define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
+
+#define HSDK_APB_RESET 0
+#define HSDK_AXI_RESET 1
+#define HSDK_ETH_RESET 2
+#define HSDK_USB_RESET 3
+#define HSDK_SDIO_RESET 4
+#define HSDK_HDMI_RESET 5
+#define HSDK_GFX_RESET 6
+#define HSDK_DMAC_RESET 7
+#define HSDK_EBI_RESET 8
+
+#endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/
diff --git a/include/dt-bindings/reset/snps,hsdk-v1-reset.h b/include/dt-bindings/reset/snps,hsdk-v1-reset.h
deleted file mode 100644
index d898c89b7123..000000000000
--- a/include/dt-bindings/reset/snps,hsdk-v1-reset.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * This header provides index for the HSDK v1 reset controller.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
-#define _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
-
-#define HSDK_V1_APB_RESET 0
-#define HSDK_V1_AXI_RESET 1
-#define HSDK_V1_ETH_RESET 2
-#define HSDK_V1_USB_RESET 3
-#define HSDK_V1_SDIO_RESET 4
-#define HSDK_V1_HDMI_RESET 5
-#define HSDK_V1_GFX_RESET 6
-#define HSDK_V1_DMAC_RESET 7
-#define HSDK_V1_EBI_RESET 8
-
-#endif /*_DT_BINDINGS_RESET_CONTROLLER_HSDK_V1*/
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fb44d6180ca0..18d05b5491f3 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
-extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..f2deb71958b2 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -92,7 +92,7 @@
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
- * @_reg: 32bit value of entire bitfield
+ * @_reg: value of entire bitfield
*
* FIELD_GET() extracts the field specified by @_mask from the
* bitfield passed in as @_reg by masking and shifting it down.
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8390859e79e7..f1af7d63d678 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
u32 key)
{
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..446b24cac67d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 339e73742e73..13dab191a23e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -403,7 +403,7 @@ struct address_space {
unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
gfp_t gfp_mask; /* implicit gfp mask for allocations */
- struct list_head private_list; /* ditto */
+ struct list_head private_list; /* for use by the address_space */
void *private_data; /* ditto */
errseq_t wb_err;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index c458d7b7ad19..6431087816ba 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void hv_process_channel_removal(u32 relid);
void vmbus_setevent(struct vmbus_channel *channel);
/*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0ad4c3044cf9..91189bb0c818 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,12 @@
#define STACK_MAGIC 0xdeadbeef
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
/* @a is a power of 2 value */
@@ -57,6 +63,10 @@
#define READ 0
#define WRITE 1
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define u64_to_user_ptr(x) ( \
@@ -76,7 +86,15 @@
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
+/**
+ * FIELD_SIZEOF - get the size of a struct's field
+ * @t: the target struct
+ * @f: the target struct's field
+ * Return: the size of @f in the struct definition without having a
+ * declared instance of @t.
+ */
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -107,7 +125,7 @@
/*
* Divide positive or negative dividend by positive or negative divisor
* and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
+ * divisors if the dividend variable type is unsigned and for negative
* dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
- * Return: a result based on val in interval [0, ep_ro).
+ * Return: a result based on @val in interval [0, @ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
@@ -618,8 +636,8 @@ do { \
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
- * Note: __trace_printk is an internal function for trace_printk and
- * the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
@@ -629,7 +647,7 @@ do { \
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
*
* A little optization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 min2 = (y); \
(void) (&min1 == &min2); \
min1 < min2 ? min1 : min2; })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define min(x, y) \
__min(typeof(x), typeof(y), \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 max2 = (y); \
(void) (&max1 == &max2); \
max1 > max2 ? max1 : max2; })
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define max(x, y) \
__max(typeof(x), typeof(y), \
__UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
x, y)
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val. See the unnecessary pointer comparisons.
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
*
* Or not use min/max/clamp at all, of course.
*/
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define min_t(type, x, y) \
__min(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define max_t(type, x, y) \
__max(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
+ * @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is. This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-/*
- * swap - swap value of @a and @b
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index eaf4ad209c8f..e32dbc4934db 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -980,7 +980,6 @@ enum mlx5_cap_type {
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
- MLX5_CAP_FPGA,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
#define MLX5_CAP_FPGA(mdev, cap) \
- MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP64_FPGA(mdev, cap) \
- MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 02ff700e4f30..401c8972cc3a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -774,6 +774,7 @@ struct mlx5_core_dev {
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
} caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a528b35a022e..69772347f866 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_80[0x18];
u8 log_max_destination[0x8];
- u8 reserved_at_a0[0x18];
+ u8 log_max_flow_counter[0x8];
+ u8 reserved_at_a8[0x10];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f8c10d336e42..065d99deb847 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp);
#if defined(CONFIG_X86_INTEL_MPX)
/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_HIGH_ARCH_BIT_4
+# define VM_MPX VM_HIGH_ARCH_4
#else
# define VM_MPX VM_NONE
#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f3f2d07feb2a..9a43763a68ad 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -316,7 +316,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
-#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */
+/* (1 << 21) is free for reuse */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 7b2e31b1745a..6866e8126982 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
#else /* CONFIG_MMU_NOTIFIER */
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 356a814e7c8e..c8f89417740b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif
-#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
-#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+static inline unsigned long pfn_to_section_nr(unsigned long pfn)
+{
+ return pfn >> PFN_SECTION_SHIFT;
+}
+static inline unsigned long section_nr_to_pfn(unsigned long sec)
+{
+ return sec << PFN_SECTION_SHIFT;
+}
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2c2a5514b0df..528b24c78308 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -108,9 +108,10 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table,
- const struct nf_hook_ops *);
+extern int ebt_register_table(struct net *net,
+ const struct ebt_table *table,
+ const struct nf_hook_ops *ops,
+ struct ebt_table **res);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
const struct nf_hook_ops *);
extern unsigned int ebt_do_table(struct sk_buff *skb,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a36abe2da13e..27e249ed7c5c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -12,11 +12,31 @@
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
+bool is_hardlockup(void);
+
+extern int watchdog_user_enabled;
+extern int nmi_watchdog_user_enabled;
+extern int soft_watchdog_user_enabled;
+extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
+
+extern struct cpumask watchdog_cpumask;
+extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
+extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
#else
-static inline void lockup_detector_init(void)
-{
-}
-#endif
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif /* !CONFIG_SMP */
+
+#else /* CONFIG_LOCKUP_DETECTOR */
+static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
+#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
#else
-static inline void touch_softlockup_watchdog_sched(void)
-{
-}
-static inline void touch_softlockup_watchdog(void)
-{
-}
-static inline void touch_softlockup_watchdog_sync(void)
-{
-}
-static inline void touch_all_softlockup_watchdogs(void)
-{
-}
+static inline void touch_softlockup_watchdog_sched(void) { }
+static inline void touch_softlockup_watchdog(void) { }
+static inline void touch_softlockup_watchdog_sync(void) { }
+static inline void touch_all_softlockup_watchdogs(void) { }
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
-static inline void reset_hung_task_detector(void)
-{
-}
+static inline void reset_hung_task_detector(void) { }
#endif
/*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
+ * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
+ * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'interface' between the parameters in /proc/sys/kernel and the internal
+ * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
+ * handled differently because its value is not boolean, and the lockup
+ * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
#define NMI_WATCHDOG_ENABLED_BIT 0
#define SOFT_WATCHDOG_ENABLED_BIT 1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+# define NMI_WATCHDOG_SYSCTL_PERM 0644
+#else
+# define NMI_WATCHDOG_SYSCTL_PERM 0444
+#endif
+
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void arch_touch_nmi_watchdog(void);
+extern void hardlockup_detector_perf_stop(void);
+extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_enable(void);
+extern void hardlockup_detector_perf_cleanup(void);
+extern int hardlockup_detector_perf_init(void);
#else
-#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void hardlockup_detector_perf_stop(void) { }
+static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_enable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
+# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
static inline void arch_touch_nmi_watchdog(void) {}
+# else
+static inline int hardlockup_detector_perf_init(void) { return 0; }
+# endif
#endif
-#endif
+
+void watchdog_nmi_stop(void);
+void watchdog_nmi_start(void);
+int watchdog_nmi_probe(void);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
- *
+ *
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
u64 hw_nmi_get_sample_period(int watchdog_thresh);
#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
-extern int nmi_watchdog_enabled;
-extern int watchdog_user_enabled;
-extern int watchdog_thresh;
-extern unsigned long watchdog_enabled;
-extern struct cpumask watchdog_cpumask;
-extern unsigned long *watchdog_cpumask_bits;
-extern int __read_mostly watchdog_suspended;
-#ifdef CONFIG_SMP
-extern int sysctl_softlockup_all_cpu_backtrace;
-extern int sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
-#endif
-
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
defined(CONFIG_HARDLOCKUP_DETECTOR)
void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
-extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int proc_watchdog_cpumask(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int lockup_detector_suspend(void);
-extern void lockup_detector_resume(void);
-#else
-static inline int lockup_detector_suspend(void)
-{
- return 0;
-}
-
-static inline void lockup_detector_resume(void)
-{
-}
-#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
diff --git a/include/linux/of.h b/include/linux/of.h
index cfc34117fc92..b240ed69dc96 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline int of_n_addr_cells(struct device_node *np)
+{
+ return 0;
+
+}
+static inline int of_n_size_cells(struct device_node *np)
+{
+ return 0;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3a19c253bdb1..ae53e413fb13 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
+ * be called from the atomic context as well
+ */
+void mmput_async(struct mm_struct *);
+#endif
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index d7b6dab956ec..7d065abc7a47 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -71,14 +71,6 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
-
- /*
- * Some variables from the most recent sd_lb_stats for this domain,
- * used by wake_affine().
- */
- unsigned long nr_running;
- unsigned long load;
- unsigned long capacity;
};
struct sched_domain {
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 12910cf19869..c149aa7bedf3 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
}
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *);
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 905d769d8ddc..5f7eeab990fe 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -42,7 +42,7 @@ enum {
#define THREAD_ALIGN THREAD_SIZE
#endif
-#ifdef CONFIG_DEBUG_STACK_USAGE
+#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
__GFP_ZERO)
#else
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e51cf5f81597..14c289393071 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
*/
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
- return nla_put(skb, attrtype, sizeof(u8), &value);
+ /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+ u8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u8), &tmp);
}
/**
@@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
*/
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
- return nla_put(skb, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u16), &tmp);
}
/**
@@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
*/
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put(skb, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}
/**
@@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be16 tmp = value;
+
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
- return nla_put(skb, attrtype, sizeof(__le16), &value);
+ __le16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}
/**
@@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
*/
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
- return nla_put(skb, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u32), &tmp);
}
/**
@@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
*/
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put(skb, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}
/**
@@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be32 tmp = value;
+
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
- return nla_put(skb, attrtype, sizeof(__le32), &value);
+ __le32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
/**
@@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
u64 value, int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
+ u64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
@@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
+ __be64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
}
/**
@@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+ __be64 tmp = value;
+
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
padattr);
}
@@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
+ __le64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
}
/**
@@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
*/
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
- return nla_put(skb, attrtype, sizeof(s8), &value);
+ s8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s8), &tmp);
}
/**
@@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
*/
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
- return nla_put(skb, attrtype, sizeof(s16), &value);
+ s16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s16), &tmp);
}
/**
@@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
*/
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
- return nla_put(skb, attrtype, sizeof(s32), &value);
+ s32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
/**
@@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
+ s64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
}
/**
@@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
- return nla_put_be32(skb, attrtype, addr);
+ __be32 tmp = addr;
+
+ return nla_put_be32(skb, attrtype, tmp);
}
/**
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 65ba335b0e7e..4fc75f7ae23b 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -39,8 +39,8 @@
/* This is used to register protocols. */
struct net_protocol {
- void (*early_demux)(struct sk_buff *skb);
- void (*early_demux_handler)(struct sk_buff *skb);
+ int (*early_demux)(struct sk_buff *skb);
+ int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
diff --git a/include/net/route.h b/include/net/route.h
index 57dfc6850d37..d538e6db1afe 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
fl4->fl4_gre_key = gre_key;
return ip_route_output_key(net, fl4);
}
-
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3bc910a9bfc6..89974c5286d8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
-void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
diff --git a/include/net/udp.h b/include/net/udp.h
index 12dfbfe2e2d7..6c759c8594e2 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
}
-void udp_v4_early_demux(struct sk_buff *skb);
+int udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 82e93ee94708..67c5a9f223f7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -192,6 +192,7 @@ struct scsi_device {
unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
+ unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 9592570e092a..36b03013d629 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -29,5 +29,6 @@
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
+#define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */
#endif
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 6183d20a01fb..b266d2a3bcb1 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
unsigned int target_id);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern int iscsi_destroy_session(struct iscsi_cls_session *session);
extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
int dd_size, uint32_t cid);
extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index d0509db6d0ec..f89cd5ee1c7a 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -95,6 +95,7 @@ enum {
#define AC_VERB_SET_EAPD_BTLENABLE 0x70c
#define AC_VERB_SET_DIGI_CONVERT_1 0x70d
#define AC_VERB_SET_DIGI_CONVERT_2 0x70e
+#define AC_VERB_SET_DIGI_CONVERT_3 0x73e
#define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f
#define AC_VERB_SET_GPIO_DATA 0x715
#define AC_VERB_SET_GPIO_MASK 0x716
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index a03acd0d398a..695257ae64ac 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
int port; /* created/attached port */
unsigned int flags; /* SNDRV_VIRMIDI_* */
rwlock_t filelist_lock;
+ struct rw_semaphore filelist_sem;
struct list_head filelist;
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 43ab5c402f98..f90860d1f897 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -312,7 +312,7 @@ union bpf_attr {
* jump into another BPF program
* @ctx: context pointer passed to next program
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- * @index: index inside array that selects specific program to run
+ * @index: 32-bit index inside array that selects specific program to run
* Return: 0 on success or negative error
*
* int bpf_clone_redirect(skb, ifindex, flags)
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 412c06a624c8..ccaea525340b 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -269,9 +269,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 36
+#define DM_VERSION_MINOR 37
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2017-06-09)"
+#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index b97725af2ac0..da161b56c79e 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
XT_BPF_MODE_FD_PINNED,
XT_BPF_MODE_FD_ELF,
};
+#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
struct xt_bpf_info_v1 {
__u16 mode;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 917cc04a0a94..7b62df86be1d 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1022,7 +1022,7 @@ select_insn:
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog;
- u64 index = BPF_R3;
+ u32 index = BPF_R3;
if (unlikely(index >= array->map.max_entries))
goto out;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index e833ed914358..be1dde967208 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -363,6 +363,7 @@ out:
putname(pname);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_obj_get_user);
static void bpf_evict_inode(struct inode *inode)
{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b914fbe1383e..8b8d6ba39e23 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
{
struct bpf_verifier_state *parent = state->parent;
+ if (regno == BPF_REG_FP)
+ /* We don't need to worry about FP liveness because it's read-only */
+ return;
+
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (state->regs[regno].live & REG_LIVE_WRITTEN)
@@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
+ regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8de11a29e495..d851df22f5c5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -24,6 +24,7 @@
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
+#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
@@ -897,6 +898,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
out:
cpus_write_unlock();
+ /*
+ * Do post unplug cleanup. This is still protected against
+ * concurrent CPU hotplug via cpu_add_remove_lock.
+ */
+ lockup_detector_cleanup();
return ret;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6bc21e202ae4..9d93db81fa36 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
/*
* Do not update time when cgroup is not active
*/
- if (cgrp == event->cgrp)
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
__update_cgrp_time(event->cgrp);
}
@@ -8955,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
static void free_pmu_context(struct pmu *pmu)
{
+ /*
+ * Static contexts such as perf_sw_context have a global lifetime
+ * and may be shared between different PMUs. Avoid freeing them
+ * when a single PMU is going away.
+ */
+ if (pmu->task_ctx_nr > perf_invalid_context)
+ return;
+
mutex_lock(&pmus_lock);
free_percpu(pmu->pmu_cpu_context);
mutex_unlock(&pmus_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index f2cd53e92147..cf28528842bc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
if (!infop)
return err;
+ if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ goto Efault;
+
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
if (!infop)
return err;
+ if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ goto Efault;
+
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
diff --git a/kernel/fork.c b/kernel/fork.c
index 10646182440f..07cc743698d3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ /* Clear stale pointers from reused stack. */
+ memset(s->addr, 0, THREAD_SIZE);
+#endif
tsk->stack_vm_area = s;
return s->addr;
}
@@ -946,6 +950,24 @@ void mmput(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mmput);
+#ifdef CONFIG_MMU
+static void mmput_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct,
+ async_put_work);
+
+ __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_users)) {
+ INIT_WORK(&mm->async_put_work, mmput_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+#endif
+
/**
* set_mm_exe_file - change a reference to the mm's executable file
*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6fc89fd93824..5a2ef92c2782 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
+ irq_do_set_affinity(d, aff, false);
ret = __irq_startup(desc);
- irq_set_affinity_locked(d, aff, false);
break;
case IRQ_STARTUP_ABORT:
return 0;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 638eb9c83d9f..9eb09aef0313 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -18,8 +18,34 @@
static inline bool irq_needs_fixup(struct irq_data *d)
{
const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
+ unsigned int cpu = smp_processor_id();
- return cpumask_test_cpu(smp_processor_id(), m);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ /*
+ * The cpumask_empty() check is a workaround for interrupt chips,
+ * which do not implement effective affinity, but the architecture has
+ * enabled the config switch. Use the general affinity mask instead.
+ */
+ if (cpumask_empty(m))
+ m = irq_data_get_affinity_mask(d);
+
+ /*
+ * Sanity check. If the mask is not empty when excluding the outgoing
+ * CPU then it must contain at least one online CPU. The outgoing CPU
+ * has been removed from the online mask already.
+ */
+ if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
+ cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
+ /*
+ * If this happens then there was a missed IRQ fixup at some
+ * point. Warn about it and enforce fixup.
+ */
+ pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
+ cpumask_pr_args(m), d->irq, cpu);
+ return true;
+ }
+#endif
+ return cpumask_test_cpu(cpu, m);
}
static bool migrate_one_irq(struct irq_desc *desc)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d00132b5c325..4bff6a10ae8e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
set_bit(IRQTF_AFFINITY, &action->thread_flags);
}
+static void irq_validate_effective_affinity(struct irq_data *data)
+{
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+ if (!cpumask_empty(m))
+ return;
+ pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
+ chip->name, data->irq);
+#endif
+}
+
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
+ if (!chip || !chip->irq_set_affinity)
+ return -EINVAL;
+
ret = chip->irq_set_affinity(data, mask, force);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
+ irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
ret = 0;
}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index ea34ed8bb952..055bb2962a0b 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -131,7 +131,7 @@ static int kcmp_epoll_target(struct task_struct *task1,
if (filp_epoll) {
filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
fput(filp_epoll);
- } else
+ }
if (IS_ERR(filp_tgt))
return PTR_ERR(filp_tgt);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e43c78f..bf8c8fd72589 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_register_patch);
+/*
+ * Remove parts of patches that touch a given kernel module. The list of
+ * patches processed might be limited. When limit is NULL, all patches
+ * will be handled.
+ */
+static void klp_cleanup_module_patches_limited(struct module *mod,
+ struct klp_patch *limit)
+{
+ struct klp_patch *patch;
+ struct klp_object *obj;
+
+ list_for_each_entry(patch, &klp_patches, list) {
+ if (patch == limit)
+ break;
+
+ klp_for_each_object(patch, obj) {
+ if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+ continue;
+
+ /*
+ * Only unpatch the module if the patch is enabled or
+ * is in transition.
+ */
+ if (patch->enabled || patch == klp_transition_patch) {
+ pr_notice("reverting patch '%s' on unloading module '%s'\n",
+ patch->mod->name, obj->mod->name);
+ klp_unpatch_object(obj);
+ }
+
+ klp_free_object_loaded(obj);
+ break;
+ }
+ }
+}
+
int klp_module_coming(struct module *mod)
{
int ret;
@@ -894,7 +929,7 @@ err:
pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
patch->mod->name, obj->mod->name, obj->mod->name);
mod->klp_alive = false;
- klp_free_object_loaded(obj);
+ klp_cleanup_module_patches_limited(mod, patch);
mutex_unlock(&klp_mutex);
return ret;
@@ -902,9 +937,6 @@ err:
void klp_module_going(struct module *mod)
{
- struct klp_patch *patch;
- struct klp_object *obj;
-
if (WARN_ON(mod->state != MODULE_STATE_GOING &&
mod->state != MODULE_STATE_COMING))
return;
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
*/
mod->klp_alive = false;
- list_for_each_entry(patch, &klp_patches, list) {
- klp_for_each_object(patch, obj) {
- if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
- continue;
-
- /*
- * Only unpatch the module if the patch is enabled or
- * is in transition.
- */
- if (patch->enabled || patch == klp_transition_patch) {
- pr_notice("reverting patch '%s' on unloading module '%s'\n",
- patch->mod->name, obj->mod->name);
- klp_unpatch_object(obj);
- }
-
- klp_free_object_loaded(obj);
- break;
- }
- }
+ klp_cleanup_module_patches_limited(mod, NULL);
mutex_unlock(&klp_mutex);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 44c8d0d17170..e36e652d996f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, struct stack_trace *trace,
int (*save)(struct stack_trace *trace))
{
+ struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
- int ret;
struct lock_list this;
- struct lock_list *uninitialized_var(target_entry);
+ int ret;
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
this.class = hlock_class(next);
this.parent = NULL;
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
- if (unlikely(!ret))
+ if (unlikely(!ret)) {
+ if (!trace->entries) {
+ /*
+ * If @save fails here, the printing might trigger
+ * a WARN but because of the !nr_entries it should
+ * not do bad things.
+ */
+ save(trace);
+ }
return print_circular_bug(&this, target_entry, next, prev, trace);
+ }
else if (unlikely(ret < 0))
return print_bfs_bug(ret);
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return print_bfs_bug(ret);
- if (save && !save(trace))
+ if (!trace->entries && !save(trace))
return 0;
/*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
if (!ret)
return 0;
- /*
- * Debugging printouts:
- */
- if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
- graph_unlock();
- printk("\n new dependency: ");
- print_lock_name(hlock_class(prev));
- printk(KERN_CONT " => ");
- print_lock_name(hlock_class(next));
- printk(KERN_CONT "\n");
- dump_stack();
- if (!graph_lock())
- return 0;
- }
return 2;
}
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
struct held_lock *hlock;
- struct stack_trace trace;
- int (*save)(struct stack_trace *trace) = save_trace;
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .max_entries = 0,
+ .entries = NULL,
+ .skip = 0,
+ };
/*
* Debugging checks.
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
*/
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next,
- distance, &trace, save);
+ distance, &trace, save_trace);
if (!ret)
return 0;
/*
- * Stop saving stack_trace if save_trace() was
- * called at least once:
- */
- if (save && ret == 2)
- save = NULL;
-
- /*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
* own direct dependencies already, so this
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6bcbfbf1a8fd..403ab9cdb949 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -350,7 +350,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
pgprot_t pgprot = PAGE_KERNEL;
struct dev_pagemap *pgmap;
struct page_map *page_map;
- int error, nid, is_ram;
+ int error, nid, is_ram, i = 0;
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -448,6 +448,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
list_del(&page->lru);
page->pgmap = pgmap;
percpu_ref_get(ref);
+ if (!(++i % 1024))
+ cond_resched();
}
devres_add(dev, page_map);
return __va(res->start);
diff --git a/kernel/params.c b/kernel/params.c
index 60b2d8101355..cc9108c2a1fd 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -224,7 +224,7 @@ char *parse_args(const char *doing,
} \
int param_get_##name(char *buffer, const struct kernel_param *kp) \
{ \
- return scnprintf(buffer, PAGE_SIZE, format, \
+ return scnprintf(buffer, PAGE_SIZE, format "\n", \
*((type *)kp->arg)); \
} \
const struct kernel_param_ops param_ops_##name = { \
@@ -236,14 +236,14 @@ char *parse_args(const char *doing,
EXPORT_SYMBOL(param_ops_##name)
-STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8);
-STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16);
-STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16);
-STANDARD_PARAM_DEF(int, int, "%i", kstrtoint);
-STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint);
-STANDARD_PARAM_DEF(long, long, "%li", kstrtol);
-STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul);
-STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
+STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8);
+STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16);
+STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16);
+STANDARD_PARAM_DEF(int, int, "%i", kstrtoint);
+STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint);
+STANDARD_PARAM_DEF(long, long, "%li", kstrtol);
+STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul);
+STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
int param_set_charp(const char *val, const struct kernel_param *kp)
{
@@ -270,7 +270,7 @@ EXPORT_SYMBOL(param_set_charp);
int param_get_charp(char *buffer, const struct kernel_param *kp)
{
- return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg));
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg));
}
EXPORT_SYMBOL(param_get_charp);
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(param_set_bool);
int param_get_bool(char *buffer, const struct kernel_param *kp)
{
/* Y and N chosen as being relatively non-coder friendly */
- return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N');
+ return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N');
}
EXPORT_SYMBOL(param_get_bool);
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(param_set_invbool);
int param_get_invbool(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
+ return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y');
}
EXPORT_SYMBOL(param_get_invbool);
@@ -460,8 +460,9 @@ static int param_array_get(char *buffer, const struct kernel_param *kp)
struct kernel_param p = *kp;
for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
+ /* Replace \n with comma */
if (i)
- buffer[off++] = ',';
+ buffer[off - 1] = ',';
p.arg = arr->elem + arr->elemsize * i;
check_kparam_locked(p.mod);
ret = arr->ops->get(buffer + off, &p);
@@ -507,7 +508,7 @@ EXPORT_SYMBOL(param_set_copystring);
int param_get_string(char *buffer, const struct kernel_param *kp)
{
const struct kparam_string *kps = kp->str;
- return strlcpy(buffer, kps->string, kps->maxlen);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string);
}
EXPORT_SYMBOL(param_get_string);
@@ -549,10 +550,6 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
kernel_param_lock(mk->mod);
count = attribute->param->ops->get(buf, attribute->param);
kernel_param_unlock(mk->mod);
- if (count > 0) {
- strcat(buf, "\n");
- ++count;
- }
return count;
}
@@ -600,7 +597,7 @@ EXPORT_SYMBOL(kernel_param_unlock);
/*
* add_sysfs_param - add a parameter to sysfs
* @mk: struct module_kobject
- * @kparam: the actual parameter definition to add to sysfs
+ * @kp: the actual parameter definition to add to sysfs
* @name: name of parameter
*
* Create a kobject if for a (per-module) parameter if mp NULL, and
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3e2b4f519009..ccd2d20e6b06 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -120,22 +120,26 @@ static void s2idle_loop(void)
* frozen processes + suspended devices + idle processors.
* Thus s2idle_enter() should be called right after
* all devices have been suspended.
+ *
+ * Wakeups during the noirq suspend of devices may be spurious,
+ * so prevent them from terminating the loop right away.
*/
error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
if (!error)
s2idle_enter();
+ else if (error == -EBUSY && pm_wakeup_pending())
+ error = 0;
- dpm_noirq_resume_devices(PMSG_RESUME);
- if (error && (error != -EBUSY || !pm_wakeup_pending())) {
- dpm_noirq_end();
- break;
- }
-
- if (s2idle_ops && s2idle_ops->wake)
+ if (!error && s2idle_ops && s2idle_ops->wake)
s2idle_ops->wake();
+ dpm_noirq_resume_devices(PMSG_RESUME);
+
dpm_noirq_end();
+ if (error)
+ break;
+
if (s2idle_ops && s2idle_ops->sync)
s2idle_ops->sync();
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0c44c7b42e6d..b0ad62b0e7b8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -884,7 +884,7 @@ void rcu_irq_exit(void)
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
- if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ if (rdtp->dynticks_nmi_nesting)
return;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -1022,7 +1022,7 @@ void rcu_irq_enter(void)
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
- if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ if (rdtp->dynticks_nmi_nesting)
return;
oldval = rdtp->dynticks_nesting;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 70ba32e08a23..d3f3094856fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p)
return 1;
}
-struct llc_stats {
- unsigned long nr_running;
- unsigned long load;
- unsigned long capacity;
- int has_capacity;
-};
+/*
+ * The purpose of wake_affine() is to quickly determine on which CPU we can run
+ * soonest. For the purpose of speed we only consider the waking and previous
+ * CPU.
+ *
+ * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
+ * will be) idle.
+ *
+ * wake_affine_weight() - considers the weight to reflect the average
+ * scheduling latency of the CPUs. This seems to work
+ * for the overloaded case.
+ */
-static bool get_llc_stats(struct llc_stats *stats, int cpu)
+static bool
+wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int prev_cpu, int sync)
{
- struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-
- if (!sds)
- return false;
+ if (idle_cpu(this_cpu))
+ return true;
- stats->nr_running = READ_ONCE(sds->nr_running);
- stats->load = READ_ONCE(sds->load);
- stats->capacity = READ_ONCE(sds->capacity);
- stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu);
+ if (sync && cpu_rq(this_cpu)->nr_running == 1)
+ return true;
- return true;
+ return false;
}
-/*
- * Can a task be moved from prev_cpu to this_cpu without causing a load
- * imbalance that would trigger the load balancer?
- *
- * Since we're running on 'stale' values, we might in fact create an imbalance
- * but recomputing these values is expensive, as that'd mean iteration 2 cache
- * domains worth of CPUs.
- */
static bool
-wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int prev_cpu, int sync)
+wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int prev_cpu, int sync)
{
- struct llc_stats prev_stats, this_stats;
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- if (!get_llc_stats(&prev_stats, prev_cpu) ||
- !get_llc_stats(&this_stats, this_cpu))
- return false;
+ this_eff_load = target_load(this_cpu, sd->wake_idx);
+ prev_eff_load = source_load(prev_cpu, sd->wake_idx);
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current LLC.
- */
if (sync) {
unsigned long current_load = task_h_load(current);
- /* in this case load hits 0 and this LLC is considered 'idle' */
- if (current_load > this_stats.load)
+ if (current_load > this_eff_load)
return true;
- this_stats.load -= current_load;
+ this_eff_load -= current_load;
}
- /*
- * The has_capacity stuff is not SMT aware, but by trying to balance
- * the nr_running on both ends we try and fill the domain at equal
- * rates, thereby first consuming cores before siblings.
- */
-
- /* if the old cache has capacity, stay there */
- if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
- return false;
-
- /* if this cache has capacity, come here */
- if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
- return true;
-
- /*
- * Check to see if we can move the load without causing too much
- * imbalance.
- */
task_load = task_h_load(p);
- this_eff_load = 100;
- this_eff_load *= prev_stats.capacity;
-
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= this_stats.capacity;
+ this_eff_load += task_load;
+ if (sched_feat(WA_BIAS))
+ this_eff_load *= 100;
+ this_eff_load *= capacity_of(prev_cpu);
- this_eff_load *= this_stats.load + task_load;
- prev_eff_load *= prev_stats.load - task_load;
+ prev_eff_load -= task_load;
+ if (sched_feat(WA_BIAS))
+ prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
+ prev_eff_load *= capacity_of(this_cpu);
return this_eff_load <= prev_eff_load;
}
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int prev_cpu, int sync)
{
int this_cpu = smp_processor_id();
- bool affine;
+ bool affine = false;
- /*
- * Default to no affine wakeups; wake_affine() should not effect a task
- * placement the load-balancer feels inclined to undo. The conservative
- * option is therefore to not move tasks when they wake up.
- */
- affine = false;
+ if (sched_feat(WA_IDLE) && !affine)
+ affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
- /*
- * If the wakeup is across cache domains, try to evaluate if movement
- * makes sense, otherwise rely on select_idle_siblings() to do
- * placement inside the cache domain.
- */
- if (!cpus_share_cache(prev_cpu, this_cpu))
- affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
+ if (sched_feat(WA_WEIGHT) && !affine)
+ affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (affine) {
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
*/
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
- struct sched_domain_shared *shared = env->sd->shared;
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
@@ -7672,22 +7633,6 @@ next_group:
if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload;
}
-
- if (!shared)
- return;
-
- /*
- * Since these are sums over groups they can contain some CPUs
- * multiple times for the NUMA domains.
- *
- * Currently only wake_affine_llc() and find_busiest_group()
- * uses these numbers, only the last is affected by this problem.
- *
- * XXX fix that.
- */
- WRITE_ONCE(shared->nr_running, sds->total_running);
- WRITE_ONCE(shared->load, sds->total_load);
- WRITE_ONCE(shared->capacity, sds->total_capacity);
}
/**
@@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env)
int cpu, balance_cpu = -1;
/*
+ * Ensure the balancing environment is consistent; can happen
+ * when the softirq triggers 'during' hotplug.
+ */
+ if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
+ return 0;
+
+ /*
* In the newly idle case, we will allow all the cpu's
* to do the newly idle load balance.
*/
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index d3fb15555291..319ed0e8a347 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
SCHED_FEAT(ATTACH_AGE_LOAD, true)
+SCHED_FEAT(WA_IDLE, true)
+SCHED_FEAT(WA_WEIGHT, true)
+SCHED_FEAT(WA_BIAS, true)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index bb3a38005b9c..0ae832e13b97 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags,
return 0;
}
-void __get_seccomp_filter(struct seccomp_filter *filter)
+static void __get_seccomp_filter(struct seccomp_filter *filter)
{
/* Reference count is bounded by the number of total processes. */
refcount_inc(&filter->usage);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 1d71c051a951..5043e7433f4b 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
* by the client, but only by calling this function.
* This function can only be called on a registered smp_hotplug_thread.
*/
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *new)
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *new)
{
struct cpumask *old = plug_thread->cpumask;
- cpumask_var_t tmp;
+ static struct cpumask tmp;
unsigned int cpu;
- if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
- return -ENOMEM;
-
- get_online_cpus();
+ lockdep_assert_cpus_held();
mutex_lock(&smpboot_threads_lock);
/* Park threads that were exclusively enabled on the old mask. */
- cpumask_andnot(tmp, old, new);
- for_each_cpu_and(cpu, tmp, cpu_online_mask)
+ cpumask_andnot(&tmp, old, new);
+ for_each_cpu_and(cpu, &tmp, cpu_online_mask)
smpboot_park_thread(plug_thread, cpu);
/* Unpark threads that are exclusively enabled on the new mask. */
- cpumask_andnot(tmp, new, old);
- for_each_cpu_and(cpu, tmp, cpu_online_mask)
+ cpumask_andnot(&tmp, new, old);
+ for_each_cpu_and(cpu, &tmp, cpu_online_mask)
smpboot_unpark_thread(plug_thread, cpu);
cpumask_copy(old, new);
mutex_unlock(&smpboot_threads_lock);
- put_online_cpus();
-
- free_cpumask_var(tmp);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 423554ad3610..d9c31bc2eaea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -872,9 +872,9 @@ static struct ctl_table kern_table[] = {
#if defined(CONFIG_LOCKUP_DETECTOR)
{
.procname = "watchdog",
- .data = &watchdog_user_enabled,
- .maxlen = sizeof (int),
- .mode = 0644,
+ .data = &watchdog_user_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_watchdog,
.extra1 = &zero,
.extra2 = &one,
@@ -890,16 +890,12 @@ static struct ctl_table kern_table[] = {
},
{
.procname = "nmi_watchdog",
- .data = &nmi_watchdog_enabled,
- .maxlen = sizeof (int),
- .mode = 0644,
+ .data = &nmi_watchdog_user_enabled,
+ .maxlen = sizeof(int),
+ .mode = NMI_WATCHDOG_SYSCTL_PERM,
.proc_handler = proc_nmi_watchdog,
.extra1 = &zero,
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
.extra2 = &one,
-#else
- .extra2 = &zero,
-#endif
},
{
.procname = "watchdog_cpumask",
@@ -911,9 +907,9 @@ static struct ctl_table kern_table[] = {
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
{
.procname = "soft_watchdog",
- .data = &soft_watchdog_enabled,
- .maxlen = sizeof (int),
- .mode = 0644,
+ .data = &soft_watchdog_user_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_soft_watchdog,
.extra1 = &zero,
.extra2 = &one,
@@ -2188,8 +2184,6 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,
if (write) {
if (*lvalp > UINT_MAX)
return -EINVAL;
- if (*lvalp > UINT_MAX)
- return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6abfafd7f173..8319e09e15b9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
static int __init set_graph_function(char *str)
{
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -6808,17 +6805,6 @@ void unregister_ftrace_graph(void)
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-#ifdef CONFIG_DYNAMIC_FTRACE
- /*
- * Function graph does not allocate the trampoline, but
- * other global_ops do. We need to reset the ALLOC_TRAMP flag
- * if one was used.
- */
- global_ops.trampoline = save_global_trampoline;
- if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
- global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
out:
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index f5d52024f6b7..6bcb854909c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -29,20 +29,29 @@
#include <linux/kvm_para.h>
#include <linux/kthread.h>
-/* Watchdog configuration */
-static DEFINE_MUTEX(watchdog_proc_mutex);
-
-int __read_mostly nmi_watchdog_enabled;
+static DEFINE_MUTEX(watchdog_mutex);
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
- NMI_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT 1
#else
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT 0
#endif
+unsigned long __read_mostly watchdog_enabled;
+int __read_mostly watchdog_user_enabled = 1;
+int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
+int __read_mostly soft_watchdog_user_enabled = 1;
+int __read_mostly watchdog_thresh = 10;
+int __read_mostly nmi_watchdog_available;
+
+struct cpumask watchdog_allowed_mask __read_mostly;
+
+struct cpumask watchdog_cpumask __read_mostly;
+unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
+
#ifdef CONFIG_HARDLOCKUP_DETECTOR
-/* boot commands */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
@@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic =
* kernel command line parameters are parsed, because otherwise it is not
* possible to override this in hardlockup_panic_setup().
*/
-void hardlockup_detector_disable(void)
+void __init hardlockup_detector_disable(void)
{
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ nmi_watchdog_user_enabled = 0;
}
static int __init hardlockup_panic_setup(char *str)
@@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str)
else if (!strncmp(str, "nopanic", 7))
hardlockup_panic = 0;
else if (!strncmp(str, "0", 1))
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ nmi_watchdog_user_enabled = 0;
else if (!strncmp(str, "1", 1))
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+ nmi_watchdog_user_enabled = 1;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
-#ifdef CONFIG_SOFTLOCKUP_DETECTOR
-int __read_mostly soft_watchdog_enabled;
-#endif
-
-int __read_mostly watchdog_user_enabled;
-int __read_mostly watchdog_thresh = 10;
-
-#ifdef CONFIG_SMP
-int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#endif
-struct cpumask watchdog_cpumask __read_mostly;
-unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
-/*
- * The 'watchdog_running' variable is set to 1 when the watchdog threads
- * are registered/started and is set to 0 when the watchdog threads are
- * unregistered/stopped, so it is an indicator whether the threads exist.
- */
-static int __read_mostly watchdog_running;
-/*
- * If a subsystem has a need to deactivate the watchdog temporarily, it
- * can use the suspend/resume interface to achieve this. The content of
- * the 'watchdog_suspended' variable reflects this state. Existing threads
- * are parked/unparked by the lockup_detector_{suspend|resume} functions
- * (see comment blocks pertaining to those functions for further details).
- *
- * 'watchdog_suspended' also prevents threads from being registered/started
- * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
- * of 'watchdog_running' cannot change while the watchdog is deactivated
- * temporarily (see related code in 'proc' handlers).
- */
-int __read_mostly watchdog_suspended;
+static int __init hardlockup_all_cpu_backtrace_setup(char *str)
+{
+ sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
+ return 1;
+}
+__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
+# endif /* CONFIG_SMP */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
/*
* These functions can be overridden if an architecture implements its
@@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended;
*/
int __weak watchdog_nmi_enable(unsigned int cpu)
{
+ hardlockup_detector_perf_enable();
return 0;
}
+
void __weak watchdog_nmi_disable(unsigned int cpu)
{
+ hardlockup_detector_perf_disable();
}
-/*
- * watchdog_nmi_reconfigure can be implemented to be notified after any
- * watchdog configuration change. The arch hardlockup watchdog should
- * respond to the following variables:
- * - nmi_watchdog_enabled
+/* Return 0, if a NMI watchdog is available. Error code otherwise */
+int __weak __init watchdog_nmi_probe(void)
+{
+ return hardlockup_detector_perf_init();
+}
+
+/**
+ * watchdog_nmi_stop - Stop the watchdog for reconfiguration
+ *
+ * The reconfiguration steps are:
+ * watchdog_nmi_stop();
+ * update_variables();
+ * watchdog_nmi_start();
+ */
+void __weak watchdog_nmi_stop(void) { }
+
+/**
+ * watchdog_nmi_start - Start the watchdog after reconfiguration
+ *
+ * Counterpart to watchdog_nmi_stop().
+ *
+ * The following variables have been updated in update_variables() and
+ * contain the currently valid configuration:
+ * - watchdog_enabled
* - watchdog_thresh
* - watchdog_cpumask
- * - sysctl_hardlockup_all_cpu_backtrace
- * - hardlockup_panic
- * - watchdog_suspended
*/
-void __weak watchdog_nmi_reconfigure(void)
+void __weak watchdog_nmi_start(void) { }
+
+/**
+ * lockup_detector_update_enable - Update the sysctl enable bit
+ *
+ * Caller needs to make sure that the NMI/perf watchdogs are off, so this
+ * can't race with watchdog_nmi_disable().
+ */
+static void lockup_detector_update_enable(void)
{
+ watchdog_enabled = 0;
+ if (!watchdog_user_enabled)
+ return;
+ if (nmi_watchdog_available && nmi_watchdog_user_enabled)
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+ if (soft_watchdog_user_enabled)
+ watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
}
-
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
-/* Helper for online, unparked cpus. */
-#define for_each_watchdog_cpu(cpu) \
- for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
-
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+/* Global variables, exported for sysctl */
+unsigned int __read_mostly softlockup_panic =
+ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+static bool softlockup_threads_initialized __read_mostly;
static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;
-unsigned int __read_mostly softlockup_panic =
- CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-
static int __init softlockup_panic_setup(char *str)
{
softlockup_panic = simple_strtoul(str, NULL, 0);
-
return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str)
{
- watchdog_enabled = 0;
+ watchdog_user_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
static int __init nosoftlockup_setup(char *str)
{
- watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
+ soft_watchdog_user_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
#ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+
static int __init softlockup_all_cpu_backtrace_setup(char *str)
{
- sysctl_softlockup_all_cpu_backtrace =
- !!simple_strtol(str, NULL, 0);
+ sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
return 1;
}
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static int __init hardlockup_all_cpu_backtrace_setup(char *str)
-{
- sysctl_hardlockup_all_cpu_backtrace =
- !!simple_strtol(str, NULL, 0);
- return 1;
-}
-__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
-#endif
#endif
+static void __lockup_detector_cleanup(void);
+
/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
* lockups can have false positives under extreme conditions. So we generally
@@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void)
int cpu;
/*
- * this is done lockless
- * do we care if a 0 races with a timestamp?
- * all it means is the softlock check starts one cycle later
+ * watchdog_mutex cannpt be taken here, as this might be called
+ * from (soft)interrupt context, so the access to
+ * watchdog_allowed_cpumask might race with a concurrent update.
+ *
+ * The watchdog time stamp can race against a concurrent real
+ * update as well, the only side effect might be a cycle delay for
+ * the softlockup check.
*/
- for_each_watchdog_cpu(cpu)
+ for_each_cpu(cpu, &watchdog_allowed_mask)
per_cpu(watchdog_touch_ts, cpu) = 0;
wq_watchdog_touch(-1);
}
@@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void)
__this_cpu_inc(hrtimer_interrupts);
}
-static int watchdog_enable_all_cpus(void);
-static void watchdog_disable_all_cpus(void);
-
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
@@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
- if (atomic_read(&watchdog_park_in_progress) != 0)
+ if (!watchdog_enabled)
return HRTIMER_NORESTART;
/* kick the hardlockup detector */
@@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
static void watchdog_enable(unsigned int cpu)
{
- struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+ struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
- /* kick off the timer for the hardlockup detector */
+ /*
+ * Start the timer first to prevent the NMI watchdog triggering
+ * before the timer has a chance to fire.
+ */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
-
- /* Enable the perf event */
- watchdog_nmi_enable(cpu);
-
- /* done here because hrtimer_start can only pin to smp_processor_id() */
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
- /* initialize timestamp */
- watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
+ /* Initialize timestamp */
__touch_watchdog();
+ /* Enable the perf event */
+ if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+ watchdog_nmi_enable(cpu);
+
+ watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
static void watchdog_disable(unsigned int cpu)
{
- struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+ struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
watchdog_set_prio(SCHED_NORMAL, 0);
- hrtimer_cancel(hrtimer);
- /* disable the perf event */
+ /*
+ * Disable the perf event first. That prevents that a large delay
+ * between disabling the timer and disabling the perf event causes
+ * the perf NMI to detect a false positive.
+ */
watchdog_nmi_disable(cpu);
+ hrtimer_cancel(hrtimer);
}
static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu)
__this_cpu_write(soft_lockup_hrtimer_cnt,
__this_cpu_read(hrtimer_interrupts));
__touch_watchdog();
-
- /*
- * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
- * failure path. Check for failures that can occur asynchronously -
- * for example, when CPUs are on-lined - and shut down the hardware
- * perf event on each CPU accordingly.
- *
- * The only non-obvious place this bit can be cleared is through
- * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
- * pr_info here would be too noisy as it would result in a message
- * every few seconds if the hardlockup was disabled but the softlockup
- * enabled.
- */
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- watchdog_nmi_disable(cpu);
}
static struct smp_hotplug_thread watchdog_threads = {
@@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = {
.unpark = watchdog_enable,
};
-/*
- * park all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function returns an error if kthread_park() of a watchdog thread
- * fails. In this situation, the watchdog threads of some CPUs can already
- * be parked and the watchdog threads of other CPUs can still be runnable.
- * Callers are expected to handle this special condition as appropriate in
- * their context.
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static int watchdog_park_threads(void)
+static void softlockup_update_smpboot_threads(void)
{
- int cpu, ret = 0;
+ lockdep_assert_held(&watchdog_mutex);
- atomic_set(&watchdog_park_in_progress, 1);
+ if (!softlockup_threads_initialized)
+ return;
- for_each_watchdog_cpu(cpu) {
- ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
- if (ret)
- break;
- }
-
- atomic_set(&watchdog_park_in_progress, 0);
-
- return ret;
+ smpboot_update_cpumask_percpu_thread(&watchdog_threads,
+ &watchdog_allowed_mask);
}
-/*
- * unpark all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static void watchdog_unpark_threads(void)
+/* Temporarily park all watchdog threads */
+static void softlockup_park_all_threads(void)
{
- int cpu;
-
- for_each_watchdog_cpu(cpu)
- kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+ cpumask_clear(&watchdog_allowed_mask);
+ softlockup_update_smpboot_threads();
}
-static int update_watchdog_all_cpus(void)
+/* Unpark enabled threads */
+static void softlockup_unpark_threads(void)
{
- int ret;
-
- ret = watchdog_park_threads();
- if (ret)
- return ret;
-
- watchdog_unpark_threads();
-
- return 0;
+ cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+ softlockup_update_smpboot_threads();
}
-static int watchdog_enable_all_cpus(void)
+static void lockup_detector_reconfigure(void)
{
- int err = 0;
-
- if (!watchdog_running) {
- err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
- &watchdog_cpumask);
- if (err)
- pr_err("Failed to create watchdog threads, disabled\n");
- else
- watchdog_running = 1;
- } else {
- /*
- * Enable/disable the lockup detectors or
- * change the sample period 'on the fly'.
- */
- err = update_watchdog_all_cpus();
-
- if (err) {
- watchdog_disable_all_cpus();
- pr_err("Failed to update lockup detectors, disabled\n");
- }
- }
-
- if (err)
- watchdog_enabled = 0;
-
- return err;
+ cpus_read_lock();
+ watchdog_nmi_stop();
+ softlockup_park_all_threads();
+ set_sample_period();
+ lockup_detector_update_enable();
+ if (watchdog_enabled && watchdog_thresh)
+ softlockup_unpark_threads();
+ watchdog_nmi_start();
+ cpus_read_unlock();
+ /*
+ * Must be called outside the cpus locked section to prevent
+ * recursive locking in the perf code.
+ */
+ __lockup_detector_cleanup();
}
-static void watchdog_disable_all_cpus(void)
+/*
+ * Create the watchdog thread infrastructure and configure the detector(s).
+ *
+ * The threads are not unparked as watchdog_allowed_mask is empty. When
+ * the threads are sucessfully initialized, take the proper locks and
+ * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
+ */
+static __init void lockup_detector_setup(void)
{
- if (watchdog_running) {
- watchdog_running = 0;
- smpboot_unregister_percpu_thread(&watchdog_threads);
- }
-}
+ int ret;
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
-{
- return smpboot_update_cpumask_percpu_thread(
- &watchdog_threads, &watchdog_cpumask);
-}
-#endif
+ /*
+ * If sysctl is off and watchdog got disabled on the command line,
+ * nothing to do here.
+ */
+ lockup_detector_update_enable();
-#else /* SOFTLOCKUP */
-static int watchdog_park_threads(void)
-{
- return 0;
-}
+ if (!IS_ENABLED(CONFIG_SYSCTL) &&
+ !(watchdog_enabled && watchdog_thresh))
+ return;
-static void watchdog_unpark_threads(void)
-{
-}
+ ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
+ &watchdog_allowed_mask);
+ if (ret) {
+ pr_err("Failed to initialize soft lockup detector threads\n");
+ return;
+ }
-static int watchdog_enable_all_cpus(void)
-{
- return 0;
+ mutex_lock(&watchdog_mutex);
+ softlockup_threads_initialized = true;
+ lockup_detector_reconfigure();
+ mutex_unlock(&watchdog_mutex);
}
-static void watchdog_disable_all_cpus(void)
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
+static inline int watchdog_park_threads(void) { return 0; }
+static inline void watchdog_unpark_threads(void) { }
+static inline int watchdog_enable_all_cpus(void) { return 0; }
+static inline void watchdog_disable_all_cpus(void) { }
+static void lockup_detector_reconfigure(void)
{
+ cpus_read_lock();
+ watchdog_nmi_stop();
+ lockup_detector_update_enable();
+ watchdog_nmi_start();
+ cpus_read_unlock();
}
-
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
+static inline void lockup_detector_setup(void)
{
- return 0;
+ lockup_detector_reconfigure();
}
-#endif
+#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
-static void set_sample_period(void)
+static void __lockup_detector_cleanup(void)
{
+ lockdep_assert_held(&watchdog_mutex);
+ hardlockup_detector_perf_cleanup();
}
-#endif /* SOFTLOCKUP */
-/*
- * Suspend the hard and soft lockup detector by parking the watchdog threads.
+/**
+ * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+ *
+ * Caller must not hold the cpu hotplug rwsem.
*/
-int lockup_detector_suspend(void)
+void lockup_detector_cleanup(void)
{
- int ret = 0;
-
- get_online_cpus();
- mutex_lock(&watchdog_proc_mutex);
- /*
- * Multiple suspend requests can be active in parallel (counted by
- * the 'watchdog_suspended' variable). If the watchdog threads are
- * running, the first caller takes care that they will be parked.
- * The state of 'watchdog_running' cannot change while a suspend
- * request is active (see related code in 'proc' handlers).
- */
- if (watchdog_running && !watchdog_suspended)
- ret = watchdog_park_threads();
-
- if (ret == 0)
- watchdog_suspended++;
- else {
- watchdog_disable_all_cpus();
- pr_err("Failed to suspend lockup detectors, disabled\n");
- watchdog_enabled = 0;
- }
-
- watchdog_nmi_reconfigure();
-
- mutex_unlock(&watchdog_proc_mutex);
-
- return ret;
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_cleanup();
+ mutex_unlock(&watchdog_mutex);
}
-/*
- * Resume the hard and soft lockup detector by unparking the watchdog threads.
+/**
+ * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
+ *
+ * Special interface for parisc. It prevents lockup detector warnings from
+ * the default pm_poweroff() function which busy loops forever.
*/
-void lockup_detector_resume(void)
+void lockup_detector_soft_poweroff(void)
{
- mutex_lock(&watchdog_proc_mutex);
-
- watchdog_suspended--;
- /*
- * The watchdog threads are unparked if they were previously running
- * and if there is no more active suspend request.
- */
- if (watchdog_running && !watchdog_suspended)
- watchdog_unpark_threads();
-
- watchdog_nmi_reconfigure();
-
- mutex_unlock(&watchdog_proc_mutex);
- put_online_cpus();
+ watchdog_enabled = 0;
}
#ifdef CONFIG_SYSCTL
-/*
- * Update the run state of the lockup detectors.
- */
-static int proc_watchdog_update(void)
+/* Propagate any changes to the watchdog threads */
+static void proc_watchdog_update(void)
{
- int err = 0;
-
- /*
- * Watchdog threads won't be started if they are already active.
- * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
- * care of this. If those threads are already active, the sample
- * period will be updated and the lockup detectors will be enabled
- * or disabled 'on the fly'.
- */
- if (watchdog_enabled && watchdog_thresh)
- err = watchdog_enable_all_cpus();
- else
- watchdog_disable_all_cpus();
-
- watchdog_nmi_reconfigure();
-
- return err;
-
+ /* Remove impossible cpus to keep sysctl output clean. */
+ cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
+ lockup_detector_reconfigure();
}
/*
* common function for watchdog, nmi_watchdog and soft_watchdog parameter
*
- * caller | table->data points to | 'which' contains the flag(s)
- * -------------------|-----------------------|-----------------------------
- * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
- * | | with SOFT_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
+ * caller | table->data points to | 'which'
+ * -------------------|----------------------------|--------------------------
+ * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
+ * | | SOFT_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
*/
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int err, old, new;
- int *watchdog_param = (int *)table->data;
+ int err, old, *param = table->data;
- get_online_cpus();
- mutex_lock(&watchdog_proc_mutex);
+ mutex_lock(&watchdog_mutex);
- if (watchdog_suspended) {
- /* no parameter changes allowed while watchdog is suspended */
- err = -EAGAIN;
- goto out;
- }
-
- /*
- * If the parameter is being read return the state of the corresponding
- * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
- * run state of the lockup detectors.
- */
if (!write) {
- *watchdog_param = (watchdog_enabled & which) != 0;
+ /*
+ * On read synchronize the userspace interface. This is a
+ * racy snapshot.
+ */
+ *param = (watchdog_enabled & which) != 0;
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
} else {
+ old = READ_ONCE(*param);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (err)
- goto out;
-
- /*
- * There is a race window between fetching the current value
- * from 'watchdog_enabled' and storing the new value. During
- * this race window, watchdog_nmi_enable() can sneak in and
- * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
- * The 'cmpxchg' detects this race and the loop retries.
- */
- do {
- old = watchdog_enabled;
- /*
- * If the parameter value is not zero set the
- * corresponding bit(s), else clear it(them).
- */
- if (*watchdog_param)
- new = old | which;
- else
- new = old & ~which;
- } while (cmpxchg(&watchdog_enabled, old, new) != old);
-
- /*
- * Update the run state of the lockup detectors. There is _no_
- * need to check the value returned by proc_watchdog_update()
- * and to restore the previous value of 'watchdog_enabled' as
- * both lockup detectors are disabled if proc_watchdog_update()
- * returns an error.
- */
- if (old == new)
- goto out;
-
- err = proc_watchdog_update();
+ if (!err && old != READ_ONCE(*param))
+ proc_watchdog_update();
}
-out:
- mutex_unlock(&watchdog_proc_mutex);
- put_online_cpus();
+ mutex_unlock(&watchdog_mutex);
return err;
}
@@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write,
int proc_nmi_watchdog(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ if (!nmi_watchdog_available && write)
+ return -ENOTSUPP;
return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
table, write, buffer, lenp, ppos);
}
@@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
int proc_watchdog_thresh(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int err, old, new;
-
- get_online_cpus();
- mutex_lock(&watchdog_proc_mutex);
+ int err, old;
- if (watchdog_suspended) {
- /* no parameter changes allowed while watchdog is suspended */
- err = -EAGAIN;
- goto out;
- }
+ mutex_lock(&watchdog_mutex);
- old = ACCESS_ONCE(watchdog_thresh);
+ old = READ_ONCE(watchdog_thresh);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (err || !write)
- goto out;
-
- /*
- * Update the sample period. Restore on failure.
- */
- new = ACCESS_ONCE(watchdog_thresh);
- if (old == new)
- goto out;
+ if (!err && write && old != READ_ONCE(watchdog_thresh))
+ proc_watchdog_update();
- set_sample_period();
- err = proc_watchdog_update();
- if (err) {
- watchdog_thresh = old;
- set_sample_period();
- }
-out:
- mutex_unlock(&watchdog_proc_mutex);
- put_online_cpus();
+ mutex_unlock(&watchdog_mutex);
return err;
}
@@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
{
int err;
- get_online_cpus();
- mutex_lock(&watchdog_proc_mutex);
-
- if (watchdog_suspended) {
- /* no parameter changes allowed while watchdog is suspended */
- err = -EAGAIN;
- goto out;
- }
+ mutex_lock(&watchdog_mutex);
err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
- if (!err && write) {
- /* Remove impossible cpus to keep sysctl output cleaner. */
- cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
- cpu_possible_mask);
-
- if (watchdog_running) {
- /*
- * Failure would be due to being unable to allocate
- * a temporary cpumask, so we are likely not in a
- * position to do much else to make things better.
- */
- if (watchdog_update_cpus() != 0)
- pr_err("cpumask update failed\n");
- }
+ if (!err && write)
+ proc_watchdog_update();
- watchdog_nmi_reconfigure();
- }
-out:
- mutex_unlock(&watchdog_proc_mutex);
- put_online_cpus();
+ mutex_unlock(&watchdog_mutex);
return err;
}
-
#endif /* CONFIG_SYSCTL */
void __init lockup_detector_init(void)
{
- set_sample_period();
-
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_enabled()) {
pr_info("Disabling watchdog on nohz_full cores by default\n");
@@ -951,6 +783,7 @@ void __init lockup_detector_init(void)
cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
#endif
- if (watchdog_enabled)
- watchdog_enable_all_cpus();
+ if (!watchdog_nmi_probe())
+ nmi_watchdog_available = true;
+ lockup_detector_setup();
}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 3a09ea1b1d3d..71a62ceacdc8 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -21,8 +21,10 @@
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
+static unsigned int watchdog_cpus;
void arch_touch_nmi_watchdog(void)
{
@@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = {
/* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
/* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0;
- if (atomic_read(&watchdog_park_in_progress) != 0)
- return;
-
if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false);
return;
@@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event,
return;
}
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long firstcpu_err;
-static atomic_t watchdog_cpus;
-
-int watchdog_nmi_enable(unsigned int cpu)
+static int hardlockup_detector_event_create(void)
{
+ unsigned int cpu = smp_processor_id();
struct perf_event_attr *wd_attr;
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
- int firstcpu = 0;
-
- /* nothing to do if the hard lockup detector is disabled */
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- goto out;
-
- /* is it already setup and enabled? */
- if (event && event->state > PERF_EVENT_STATE_OFF)
- goto out;
-
- /* it is setup but not enabled */
- if (event != NULL)
- goto out_enable;
-
- if (atomic_inc_return(&watchdog_cpus) == 1)
- firstcpu = 1;
+ struct perf_event *evt;
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
/* Try to register using hardware perf events */
- event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+ evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
+ watchdog_overflow_callback, NULL);
+ if (IS_ERR(evt)) {
+ pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
+ PTR_ERR(evt));
+ return PTR_ERR(evt);
+ }
+ this_cpu_write(watchdog_ev, evt);
+ return 0;
+}
- /* save the first cpu's error for future comparision */
- if (firstcpu && IS_ERR(event))
- firstcpu_err = PTR_ERR(event);
+/**
+ * hardlockup_detector_perf_enable - Enable the local event
+ */
+void hardlockup_detector_perf_enable(void)
+{
+ if (hardlockup_detector_event_create())
+ return;
- if (!IS_ERR(event)) {
- /* only print for the first cpu initialized */
- if (firstcpu || firstcpu_err)
- pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
- goto out_save;
- }
+ if (!watchdog_cpus++)
+ pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
- /*
- * Disable the hard lockup detector if _any_ CPU fails to set up
- * set up the hardware perf event. The watchdog() function checks
- * the NMI_WATCHDOG_ENABLED bit periodically.
- *
- * The barriers are for syncing up watchdog_enabled across all the
- * cpus, as clear_bit() does not use barriers.
- */
- smp_mb__before_atomic();
- clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
- smp_mb__after_atomic();
-
- /* skip displaying the same error again */
- if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
- return PTR_ERR(event);
-
- /* vary the KERN level based on the returned errno */
- if (PTR_ERR(event) == -EOPNOTSUPP)
- pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
- else if (PTR_ERR(event) == -ENOENT)
- pr_warn("disabled (cpu%i): hardware events not enabled\n",
- cpu);
- else
- pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
- cpu, PTR_ERR(event));
-
- pr_info("Shutting down hard lockup detector on all cpus\n");
-
- return PTR_ERR(event);
-
- /* success path */
-out_save:
- per_cpu(watchdog_ev, cpu) = event;
-out_enable:
- perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
- return 0;
+ perf_event_enable(this_cpu_read(watchdog_ev));
}
-void watchdog_nmi_disable(unsigned int cpu)
+/**
+ * hardlockup_detector_perf_disable - Disable the local event
+ */
+void hardlockup_detector_perf_disable(void)
{
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
+ struct perf_event *event = this_cpu_read(watchdog_ev);
if (event) {
perf_event_disable(event);
+ cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
+ watchdog_cpus--;
+ }
+}
+
+/**
+ * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
+ *
+ * Called from lockup_detector_cleanup(). Serialized by the caller.
+ */
+void hardlockup_detector_perf_cleanup(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, &dead_events_mask) {
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ /*
+ * Required because for_each_cpu() reports unconditionally
+ * CPU0 as set on UP kernels. Sigh.
+ */
+ if (event)
+ perf_event_release_kernel(event);
per_cpu(watchdog_ev, cpu) = NULL;
+ }
+ cpumask_clear(&dead_events_mask);
+}
+
+/**
+ * hardlockup_detector_perf_stop - Globally stop watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_stop(void)
+{
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ for_each_online_cpu(cpu) {
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ if (event)
+ perf_event_disable(event);
+ }
+}
- /* should be in cleanup, but blocks oprofile */
- perf_event_release_kernel(event);
+/**
+ * hardlockup_detector_perf_restart - Globally restart watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_restart(void)
+{
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ return;
+
+ for_each_online_cpu(cpu) {
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ if (event)
+ perf_event_enable(event);
+ }
+}
+
+/**
+ * hardlockup_detector_perf_init - Probe whether NMI event is available at all
+ */
+int __init hardlockup_detector_perf_init(void)
+{
+ int ret = hardlockup_detector_event_create();
- /* watchdog_nmi_enable() expects this to be zero initially. */
- if (atomic_dec_and_test(&watchdog_cpus))
- firstcpu_err = 0;
+ if (ret) {
+ pr_info("Perf NMI watchdog permanently disabled\n");
+ } else {
+ perf_event_release_kernel(this_cpu_read(watchdog_ev));
+ this_cpu_write(watchdog_ev, NULL);
}
+ return ret;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7566eff22236..ff21b4dbb392 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
- select LOCKDEP_CROSSRELEASE
- select LOCKDEP_COMPLETIONS
+ select LOCKDEP_CROSSRELEASE if BROKEN
+ select LOCKDEP_COMPLETIONS if BROKEN
select TRACE_IRQFLAGS
default n
help
@@ -1590,6 +1590,54 @@ config LATENCYTOP
source kernel/trace/Kconfig
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
+config DMA_API_DEBUG
+ bool "Enable debugging of DMA-API usage"
+ depends on HAVE_DMA_API_DEBUG
+ help
+ Enable this option to debug the use of the DMA API by device drivers.
+ With this option you will be able to detect common bugs in device
+ drivers like double-freeing of DMA mappings or freeing mappings that
+ were never allocated.
+
+ This also attempts to catch cases where a page owned by DMA is
+ accessed by the cpu in a way that could cause data corruption. For
+ example, this enables cow_user_page() to check that the source page is
+ not undergoing DMA.
+
+ This option causes a performance degradation. Use only if you want to
+ debug device drivers and dma interactions.
+
+ If unsure, say N.
+
menu "Runtime Testing"
config LKDTM
@@ -1749,56 +1797,6 @@ config TEST_PARMAN
If unsure, say N.
-endmenu # runtime tests
-
-config PROVIDE_OHCI1394_DMA_INIT
- bool "Remote debugging over FireWire early on boot"
- depends on PCI && X86
- help
- If you want to debug problems which hang or crash the kernel early
- on boot and the crashing machine has a FireWire port, you can use
- this feature to remotely access the memory of the crashed machine
- over FireWire. This employs remote DMA as part of the OHCI1394
- specification which is now the standard for FireWire controllers.
-
- With remote DMA, you can monitor the printk buffer remotely using
- firescope and access all memory below 4GB using fireproxy from gdb.
- Even controlling a kernel debugger is possible using remote DMA.
-
- Usage:
-
- If ohci1394_dma=early is used as boot parameter, it will initialize
- all OHCI1394 controllers which are found in the PCI config space.
-
- As all changes to the FireWire bus such as enabling and disabling
- devices cause a bus reset and thereby disable remote DMA for all
- devices, be sure to have the cable plugged and FireWire enabled on
- the debugging host before booting the debug target for debugging.
-
- This code (~1k) is freed after boot. By then, the firewire stack
- in charge of the OHCI-1394 controllers should be used instead.
-
- See Documentation/debugging-via-ohci1394.txt for more information.
-
-config DMA_API_DEBUG
- bool "Enable debugging of DMA-API usage"
- depends on HAVE_DMA_API_DEBUG
- help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption. For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation. Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
config TEST_LKM
tristate "Test module loading with 'hello world' module"
default n
@@ -1873,18 +1871,6 @@ config TEST_UDELAY
If unsure, say N.
-config MEMTEST
- bool "Memtest"
- depends on HAVE_MEMBLOCK
- ---help---
- This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
- memtest=0, mean disabled; -- default
- memtest=1, mean do 1 test pattern;
- ...
- memtest=17, mean do 17 test patterns.
- If you are unsure how to answer this question, answer N.
-
config TEST_STATIC_KEYS
tristate "Test static keys"
default n
@@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS
If unsure, say N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
-
- If unsure, say N.
-
config TEST_KMOD
tristate "kmod stress tester"
default n
@@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL
If unsure, say N.
+endmenu # runtime tests
+
+config MEMTEST
+ bool "Memtest"
+ depends on HAVE_MEMBLOCK
+ ---help---
+ This option adds a kernel parameter 'memtest', which allows memtest
+ to be set.
+ memtest=0, mean disabled; -- default
+ memtest=1, mean do 1 test pattern;
+ ...
+ memtest=17, mean do 17 test patterns.
+ If you are unsure how to answer this question, answer N.
+
+config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+ for validity.
+
+ If unsure, say N.
source "samples/Kconfig"
diff --git a/lib/idr.c b/lib/idr.c
index f9adf4805fd7..edd9b2be1651 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(idr_get_next_ext);
* idr_alloc() and idr_remove() (as long as the ID being removed is not
* the one being replaced!).
*
- * Returns: 0 on success. %-ENOENT indicates that @id was not found.
- * %-EINVAL indicates that @id or @ptr were not valid.
+ * Returns: the old value on success. %-ENOENT indicates that @id was not
+ * found. %-EINVAL indicates that @id or @ptr were not valid.
*/
void *idr_replace(struct idr *idr, void *ptr, int id)
{
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index cd0b5c964bd0..2b827b8a1d8c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+#ifdef CONFIG_PROVE_LOCKING
/*
* Lockdep does indeed fail here, but there's nothing we can do about
* that now. Don't kill lockdep for it.
*/
unexpected_testcase_failures--;
+#endif
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index bd3574312b82..141734d255e4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -85,8 +85,8 @@ static FORCE_INLINE int LZ4_decompress_generic(
const BYTE * const lowLimit = lowPrefix - dictSize;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
- const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+ static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+ static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 08f8043cac61..d01f47135239 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (time_is_before_jiffies(rs->begin + rs->interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
- pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
+ printk_deferred(KERN_WARNING
+ "%s: %d callbacks suppressed\n",
+ func, rs->missed);
rs->missed = 0;
}
}
diff --git a/mm/cma.c b/mm/cma.c
index c0da318c020e..022e52bd8370 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
trace_cma_alloc(pfn, page, count, align);
- if (ret) {
+ if (ret && !(gfp_mask & __GFP_NOWARN)) {
pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret);
cma_debug_show_areas(cma);
diff --git a/mm/compaction.c b/mm/compaction.c
index fb548e4c7bd4..03d31a875341 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1999,17 +1999,14 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
if (pgdat->kcompactd_max_order < order)
pgdat->kcompactd_max_order = order;
- /*
- * Pairs with implicit barrier in wait_event_freezable()
- * such that wakeups are not missed in the lockless
- * waitqueue_active() call.
- */
- smp_acquire__after_ctrl_dep();
-
if (pgdat->kcompactd_classzone_idx > classzone_idx)
pgdat->kcompactd_classzone_idx = classzone_idx;
- if (!waitqueue_active(&pgdat->kcompactd_wait))
+ /*
+ * Pairs with implicit barrier in wait_event_freezable()
+ * such that wakeups are not missed.
+ */
+ if (!wq_has_sleeper(&pgdat->kcompactd_wait))
return;
if (!kcompactd_node_suitable(pgdat))
diff --git a/mm/filemap.c b/mm/filemap.c
index db250d0e0565..594d73fef8b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -620,6 +620,14 @@ int file_check_and_advance_wb_err(struct file *file)
trace_file_check_and_advance_wb_err(file, old);
spin_unlock(&file->f_lock);
}
+
+ /*
+ * We're mostly using this function as a drop in replacement for
+ * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
+ * that the legacy code would have had on these flags.
+ */
+ clear_bit(AS_EIO, &mapping->flags);
+ clear_bit(AS_ENOSPC, &mapping->flags);
return err;
}
EXPORT_SYMBOL(file_check_and_advance_wb_err);
diff --git a/mm/ksm.c b/mm/ksm.c
index 15dd7415f7b3..6cb60f46cce5 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
*/
static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
{
+ struct mm_struct *mm = rmap_item->mm;
struct rmap_item *tree_rmap_item;
struct page *tree_page = NULL;
struct stable_node *stable_node;
@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
if (ksm_use_zero_pages && (checksum == zero_checksum)) {
struct vm_area_struct *vma;
- vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
+ down_read(&mm->mmap_sem);
+ vma = find_mergeable_vma(mm, rmap_item->address);
err = try_to_merge_one_page(vma, page,
ZERO_PAGE(rmap_item->address));
+ up_read(&mm->mmap_sem);
/*
* In case of failure, the page was not really empty, so we
* need to continue. Otherwise we're done.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7a40fa2be858..f141f0c80ff3 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -325,12 +325,12 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
{
int size = memcg_nr_cache_ids;
- nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
+ nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL);
if (!nlru->memcg_lrus)
return -ENOMEM;
if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
- kfree(nlru->memcg_lrus);
+ kvfree(nlru->memcg_lrus);
return -ENOMEM;
}
@@ -340,7 +340,7 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
{
__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
- kfree(nlru->memcg_lrus);
+ kvfree(nlru->memcg_lrus);
}
static int memcg_update_list_lru_node(struct list_lru_node *nlru,
@@ -351,12 +351,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
BUG_ON(old_size > new_size);
old = nlru->memcg_lrus;
- new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
+ new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (__memcg_init_list_lru_node(new, old_size, new_size)) {
- kfree(new);
+ kvfree(new);
return -ENOMEM;
}
@@ -373,7 +373,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
nlru->memcg_lrus = new;
spin_unlock_irq(&nlru->lock);
- kfree(old);
+ kvfree(old);
return 0;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 21261ff0466f..fd70d6aabc3e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -625,18 +625,26 @@ static int madvise_inject_error(int behavior,
{
struct page *page;
struct zone *zone;
+ unsigned int order;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- for (; start < end; start += PAGE_SIZE <<
- compound_order(compound_head(page))) {
+
+ for (; start < end; start += PAGE_SIZE << order) {
int ret;
ret = get_user_pages_fast(start, 1, 0, &page);
if (ret != 1)
return ret;
+ /*
+ * When soft offlining hugepages, after migrating the page
+ * we dissolve it, therefore in the second loop "page" will
+ * no longer be a compound page, and order will be 0.
+ */
+ order = compound_order(compound_head(page));
+
if (PageHWPoison(page)) {
put_page(page);
continue;
@@ -749,6 +757,9 @@ madvise_behavior_valid(int behavior)
* MADV_DONTFORK - omit this area from child's address space when forking:
* typically, to avoid COWing pages pinned by get_user_pages().
* MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
+ * MADV_WIPEONFORK - present the child process with zero-filled memory in this
+ * range after a fork.
+ * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
* MADV_HWPOISON - trigger memory error handler as if the given memory range
* were corrupted by unrecoverable hardware memory failure.
* MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
@@ -769,7 +780,9 @@ madvise_behavior_valid(int behavior)
* zero - success
* -EINVAL - start + len < 0, start is not page-aligned,
* "behavior" is not a valid value, or application
- * is attempting to release locked or shared pages.
+ * is attempting to release locked or shared pages,
+ * or the specified address range includes file, Huge TLB,
+ * MAP_SHARED or VMPFNMAP range.
* -ENOMEM - addresses in the specified range are not currently
* mapped, or are outside the AS of the process.
* -EIO - an I/O error occurred while paging in data.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 15af3da5af02..d5f3a62887cf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy)
struct memcg_stock_pcp *stock;
unsigned long flags;
+ /*
+ * The only protection from memory hotplug vs. drain_stock races is
+ * that we always operate on local CPU stock here with IRQ disabled
+ */
local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
/* If someone's already draining, avoid adding running more workers. */
if (!mutex_trylock(&percpu_charge_mutex))
return;
- /* Notify other cpus that system-wide "drain" is running */
- get_online_cpus();
+ /*
+ * Notify other cpus that system-wide "drain" is running
+ * We do not care about races with the cpu hotplug because cpu down
+ * as well as workers from this path always operate on the local
+ * per-cpu data. CPU up doesn't touch memcg_stock at all.
+ */
curcpu = get_cpu();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
memcg = stock->cached;
- if (!memcg || !stock->nr_pages)
+ if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
continue;
- if (!mem_cgroup_is_descendant(memcg, root_memcg))
+ if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
+ css_put(&memcg->css);
continue;
+ }
if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
if (cpu == curcpu)
drain_local_stock(&stock->work);
else
schedule_work_on(cpu, &stock->work);
}
+ css_put(&memcg->css);
}
put_cpu();
- put_online_cpus();
mutex_unlock(&percpu_charge_mutex);
}
@@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug)
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+ VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
+ !PageHWPoison(page) , page);
if (!page->mem_cgroup)
return;
diff --git a/mm/memory.c b/mm/memory.c
index ec4e15494901..a728bed16c20 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -845,7 +845,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
* vm_normal_page() so that we do not have to special case all
* call site of vm_normal_page().
*/
- if (likely(pfn < highest_memmap_pfn)) {
+ if (likely(pfn <= highest_memmap_pfn)) {
struct page *page = pfn_to_page(pfn);
if (is_device_public_page(page)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e882cb6da994..d4b5f29906b9 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -328,6 +328,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
if (err && (err != -EEXIST))
break;
err = 0;
+ cond_resched();
}
vmemmap_populate_print_last();
out:
@@ -337,7 +338,7 @@ EXPORT_SYMBOL_GPL(__add_pages);
#ifdef CONFIG_MEMORY_HOTREMOVE
/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
-static int find_smallest_section_pfn(int nid, struct zone *zone,
+static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -362,7 +363,7 @@ static int find_smallest_section_pfn(int nid, struct zone *zone,
}
/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
-static int find_biggest_section_pfn(int nid, struct zone *zone,
+static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -550,7 +551,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
return ret;
scn_nr = __section_nr(ms);
- start_pfn = section_nr_to_pfn(scn_nr);
+ start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
__remove_zone(zone, start_pfn);
sparse_remove_one_section(zone, ms, map_offset);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 006ba625c0b8..a2af6d58a68f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
struct page *page;
page = __alloc_pages(gfp, order, nid);
- if (page && page_to_nid(page) == nid)
- inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
+ if (page && page_to_nid(page) == nid) {
+ preempt_disable();
+ __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
+ preempt_enable();
+ }
return page;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 6954c1435833..e00814ca390e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start,
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE;
+ migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
+ migrate->npages++;
migrate->cpages++;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 99736e026712..dee0f75c3013 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -40,6 +40,7 @@
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/init.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlb.h>
#include "internal.h"
@@ -495,6 +496,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
}
/*
+ * If the mm has notifiers then we would need to invalidate them around
+ * unmap_page_range and that is risky because notifiers can sleep and
+ * what they do is basically undeterministic. So let's have a short
+ * sleep to give the oom victim some more time.
+ * TODO: we really want to get rid of this ugly hack and make sure that
+ * notifiers cannot block for unbounded amount of time and add
+ * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+ */
+ if (mm_has_notifiers(mm)) {
+ up_read(&mm->mmap_sem);
+ schedule_timeout_idle(HZ);
+ goto unlock_oom;
+ }
+
+ /*
* MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
* work on the mm anymore. The check for MMF_OOM_SKIP must run
* under mmap_sem for reading because it serializes against the
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c841af88836a..77e4d3c5c57b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1190,7 +1190,7 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void init_reserved_page(unsigned long pfn)
+static void __meminit init_reserved_page(unsigned long pfn)
{
pg_data_t *pgdat;
int nid, zid;
@@ -5367,6 +5367,7 @@ not_early:
__init_single_page(page, pfn, zone, nid);
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ cond_resched();
} else {
__init_single_pfn(pfn, zone, nid);
}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 6a03946469a9..53afbb919a1c 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -6,17 +6,6 @@
#include "internal.h"
-static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
-{
- pmd_t pmde;
- /*
- * Make sure we don't re-load pmd between present and !trans_huge check.
- * We need a consistent view.
- */
- pmde = READ_ONCE(*pvmw->pmd);
- return pmd_present(pmde) && !pmd_trans_huge(pmde);
-}
-
static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
page_vma_mapped_walk_done(pvmw);
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
+ pmd_t pmde;
/* The only possible pmd mapping has been handled on last iteration */
if (pvmw->pmd && !pvmw->pte)
@@ -148,7 +138,13 @@ restart:
if (!pud_present(*pud))
return false;
pvmw->pmd = pmd_offset(pud, pvmw->address);
- if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) {
+ /*
+ * Make sure the pmd value isn't cached in a register by the
+ * compiler and used as a stale value after we've observed a
+ * subsequent update.
+ */
+ pmde = READ_ONCE(*pvmw->pmd);
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
if (likely(pmd_trans_huge(*pvmw->pmd))) {
if (pvmw->flags & PVMW_MIGRATION)
@@ -167,17 +163,15 @@ restart:
return not_found(pvmw);
return true;
}
- } else
- WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+ }
return not_found(pvmw);
} else {
/* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
- } else {
- if (!check_pmd(pvmw))
- return false;
+ } else if (!pmd_present(pmde)) {
+ return false;
}
if (!map_pte(pvmw))
goto next_pte;
diff --git a/mm/rodata_test.c b/mm/rodata_test.c
index 6bb4deb12e78..d908c8769b48 100644
--- a/mm/rodata_test.c
+++ b/mm/rodata_test.c
@@ -14,7 +14,7 @@
#include <linux/uaccess.h>
#include <asm/sections.h>
-const int rodata_test_data = 0xC3;
+static const int rodata_test_data = 0xC3;
void rodata_test(void)
{
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 904a83be82de..80164599ca5d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -165,9 +165,9 @@ static int init_memcg_params(struct kmem_cache *s,
if (!memcg_nr_cache_ids)
return 0;
- arr = kzalloc(sizeof(struct memcg_cache_array) +
- memcg_nr_cache_ids * sizeof(void *),
- GFP_KERNEL);
+ arr = kvzalloc(sizeof(struct memcg_cache_array) +
+ memcg_nr_cache_ids * sizeof(void *),
+ GFP_KERNEL);
if (!arr)
return -ENOMEM;
@@ -178,15 +178,23 @@ static int init_memcg_params(struct kmem_cache *s,
static void destroy_memcg_params(struct kmem_cache *s)
{
if (is_root_cache(s))
- kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+ kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+}
+
+static void free_memcg_params(struct rcu_head *rcu)
+{
+ struct memcg_cache_array *old;
+
+ old = container_of(rcu, struct memcg_cache_array, rcu);
+ kvfree(old);
}
static int update_memcg_params(struct kmem_cache *s, int new_array_size)
{
struct memcg_cache_array *old, *new;
- new = kzalloc(sizeof(struct memcg_cache_array) +
- new_array_size * sizeof(void *), GFP_KERNEL);
+ new = kvzalloc(sizeof(struct memcg_cache_array) +
+ new_array_size * sizeof(void *), GFP_KERNEL);
if (!new)
return -ENOMEM;
@@ -198,7 +206,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size)
rcu_assign_pointer(s->memcg_params.memcg_caches, new);
if (old)
- kfree_rcu(old, rcu);
+ call_rcu(&old->rcu, free_memcg_params);
return 0;
}
diff --git a/mm/swap.c b/mm/swap.c
index 9295ae960d66..a77d68f2c1b6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
del_page_from_lru_list(page, lruvec,
@@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page)
void mark_page_lazyfree(struct page *page)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
get_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 71ce2d1ccbf7..05b6803f0cce 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
bool swap_vma_readahead = true;
-#define SWAP_RA_MAX_ORDER_DEFAULT 3
-
-static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
-
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
@@ -242,6 +238,17 @@ int add_to_swap(struct page *page)
* clear SWAP_HAS_CACHE flag.
*/
goto fail;
+ /*
+ * Normally the page will be dirtied in unmap because its pte should be
+ * dirty. A special case is MADV_FREE page. The page'e pte could have
+ * dirty bit cleared but the page's SwapBacked bit is still set because
+ * clearing the dirty bit and SwapBacked bit has no lock protected. For
+ * such page, unmap will not set dirty bit for it, so page reclaim will
+ * not write the page out. This can cause data corruption when the page
+ * is swap in later. Always setting the dirty bit for the page solves
+ * the problem.
+ */
+ set_page_dirty(page);
return 1;
@@ -653,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
pte_t *tpte;
#endif
+ max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
+ SWAP_RA_ORDER_CEILING);
+ if (max_win == 1) {
+ swap_ra->win = 1;
+ return NULL;
+ }
+
faddr = vmf->address;
entry = pte_to_swp_entry(vmf->orig_pte);
if ((unlikely(non_swap_entry(entry))))
@@ -661,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
if (page)
return page;
- max_win = 1 << READ_ONCE(swap_ra_max_order);
- if (max_win == 1) {
- swap_ra->win = 1;
- return NULL;
- }
-
fpfn = PFN_DOWN(faddr);
swap_ra_info = GET_SWAP_RA_VAL(vma);
pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
@@ -775,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr =
__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
vma_ra_enabled_store);
-static ssize_t vma_ra_max_order_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", swap_ra_max_order);
-}
-static ssize_t vma_ra_max_order_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- int err, v;
-
- err = kstrtoint(buf, 10, &v);
- if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
- return -EINVAL;
-
- swap_ra_max_order = v;
-
- return count;
-}
-static struct kobj_attribute vma_ra_max_order_attr =
- __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
- vma_ra_max_order_store);
-
static struct attribute *swap_attrs[] = {
&vma_ra_enabled_attr.attr,
- &vma_ra_max_order_attr.attr,
NULL,
};
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8a43db6284eb..673942094328 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- if (fatal_signal_pending(current)) {
- area->nr_pages = i;
- goto fail_no_warn;
- }
-
if (node == NUMA_NO_NODE)
page = alloc_page(alloc_mask|highmem_mask);
else
@@ -1723,7 +1718,6 @@ fail:
warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
-fail_no_warn:
vfree(area->addr);
return NULL;
}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 486550df32be..b2ba2ba585f3 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
WARN_ON(!list_empty(&zhdr->buddy));
set_bit(PAGE_STALE, &page->private);
+ clear_bit(NEEDS_COMPACTING, &page->private);
spin_lock(&pool->lock);
if (!list_empty(&page->lru))
list_del(&page->lru);
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
list_del(&zhdr->buddy);
if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
continue;
- clear_bit(NEEDS_COMPACTING, &page->private);
spin_unlock(&pool->stale_lock);
cancel_work_sync(&zhdr->work);
free_z3fold_page(page);
@@ -624,10 +624,8 @@ lookup:
* stale pages list. cancel_work_sync() can sleep so we must make
* sure it won't be called in case we're in atomic context.
*/
- if (zhdr && (can_sleep || !work_pending(&zhdr->work) ||
- !unlikely(work_busy(&zhdr->work)))) {
+ if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
list_del(&zhdr->buddy);
- clear_bit(NEEDS_COMPACTING, &page->private);
spin_unlock(&pool->stale_lock);
if (can_sleep)
cancel_work_sync(&zhdr->work);
@@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
goto next;
}
next:
+ spin_lock(&pool->lock);
if (test_bit(PAGE_HEADLESS, &page->private)) {
if (ret == 0) {
+ spin_unlock(&pool->lock);
free_z3fold_page(page);
return 0;
}
} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
atomic64_dec(&pool->pages_nr);
+ spin_unlock(&pool->lock);
return 0;
}
- spin_lock(&pool->lock);
/*
* Add to the beginning of LRU.
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e2ed69850489..0bc31de9071a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp)
if (unlikely(!skb))
return false;
+ if (unlikely(!(vlan_dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ *skbp = NULL;
+ return false;
+ }
+
skb->dev = vlan_dev;
if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
/* Our lower layer thinks this is not local, let's make sure.
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 2585b100ebbb..276b60262981 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
static int __net_init broute_net_init(struct net *net)
{
- net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
- return PTR_ERR_OR_ZERO(net->xt.broute_table);
+ return ebt_register_table(net, &broute_table, NULL,
+ &net->xt.broute_table);
}
static void __net_exit broute_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 45a00dbdbcad..c41da5fac84f 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
static int __net_init frame_filter_net_init(struct net *net)
{
- net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
- return PTR_ERR_OR_ZERO(net->xt.frame_filter);
+ return ebt_register_table(net, &frame_filter, ebt_ops_filter,
+ &net->xt.frame_filter);
}
static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 57cd5bb154e7..08df7406ecb3 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
static int __net_init frame_nat_net_init(struct net *net)
{
- net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
- return PTR_ERR_OR_ZERO(net->xt.frame_nat);
+ return ebt_register_table(net, &frame_nat, ebt_ops_nat,
+ &net->xt.frame_nat);
}
static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 83951f978445..3b3dcf719e07 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
kfree(table);
}
-struct ebt_table *
-ebt_register_table(struct net *net, const struct ebt_table *input_table,
- const struct nf_hook_ops *ops)
+int ebt_register_table(struct net *net, const struct ebt_table *input_table,
+ const struct nf_hook_ops *ops, struct ebt_table **res)
{
struct ebt_table_info *newinfo;
struct ebt_table *t, *table;
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
repl->entries == NULL || repl->entries_size == 0 ||
repl->counters != NULL || input_table->private != NULL) {
BUGPRINT("Bad table data for ebt_register_table!!!\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/* Don't add one table to multiple lists. */
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
mutex_unlock(&ebt_mutex);
+ WRITE_ONCE(*res, table);
+
if (!ops)
- return table;
+ return 0;
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
if (ret) {
__ebt_unregister_table(net, table);
- return ERR_PTR(ret);
+ *res = NULL;
}
- return table;
+ return ret;
free_unlock:
mutex_unlock(&ebt_mutex);
free_chainstack:
@@ -1276,7 +1277,7 @@ free_newinfo:
free_table:
kfree(table);
out:
- return ERR_PTR(ret);
+ return ret;
}
void ebt_unregister_table(struct net *net, struct ebt_table *table,
diff --git a/net/core/filter.c b/net/core/filter.c
index 82edad58d066..74b8c91fb5f4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
- bool ret = __sk_filter_charge(sk, fp);
- if (ret)
- refcount_inc(&fp->refcnt);
- return ret;
+ if (!refcount_inc_not_zero(&fp->refcnt))
+ return false;
+
+ if (!__sk_filter_charge(sk, fp)) {
+ sk_filter_release(fp);
+ return false;
+ }
+ return true;
}
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a78fd61da0ec..d4bcdcc68e92 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3854,6 +3854,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
ifsm = nlmsg_data(nlh);
+ ifsm->family = PF_UNSPEC;
+ ifsm->pad1 = 0;
+ ifsm->pad2 = 0;
ifsm->ifindex = dev->ifindex;
ifsm->filter_mask = filter_mask;
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..23953b741a41 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_copy(newsk, sk);
+ newsk->sk_prot_creator = sk->sk_prot;
+
/* SANITY */
if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk));
@@ -1682,13 +1684,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_reset_flag(newsk, SOCK_DONE);
- filter = rcu_dereference_protected(newsk->sk_filter, 1);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
/* though it's an empty new sock, the charging may fail
* if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/
is_charged = sk_filter_charge(newsk, filter);
+ RCU_INIT_POINTER(newsk->sk_filter, filter);
+ rcu_read_unlock();
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* We need to make sure that we don't uncharge the new
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2afa99506f8b..865e29e62bad 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
p->old_duplex = -1;
port->netdev = slave_dev;
- ret = register_netdev(slave_dev);
- if (ret) {
- netdev_err(master, "error %d registering interface %s\n",
- ret, slave_dev->name);
- port->netdev = NULL;
- free_percpu(p->stats64);
- free_netdev(slave_dev);
- return ret;
- }
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(p, slave_dev);
if (ret) {
netdev_err(master, "error %d setting up slave phy\n", ret);
- unregister_netdev(slave_dev);
- free_percpu(p->stats64);
- free_netdev(slave_dev);
- return ret;
+ goto out_free;
+ }
+
+ ret = register_netdev(slave_dev);
+ if (ret) {
+ netdev_err(master, "error %d registering interface %s\n",
+ ret, slave_dev->name);
+ goto out_phy;
}
return 0;
+
+out_phy:
+ phy_disconnect(p->phy);
+ if (of_phy_is_fixed_link(p->dp->dn))
+ of_phy_deregister_fixed_link(p->dp->dn);
+out_free:
+ free_percpu(p->stats64);
+ free_netdev(slave_dev);
+ port->netdev = NULL;
+ return ret;
}
void dsa_slave_destroy(struct net_device *slave_dev)
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 416bb304a281..1859c473b21a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
greh = (struct gre_base_hdr *)skb_transport_header(skb);
pcsum = (__sum16 *)(greh + 1);
- if (gso_partial) {
+ if (gso_partial && skb_is_gso(skb)) {
unsigned int partial_adj;
/* Adjust checksum to account for the fact that
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e7eb590c86ce..b20c8ac64081 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
break;
}
if (cmp == -1)
- pp = &(*pp)->rb_left;
+ pp = &next->rb_left;
else
- pp = &(*pp)->rb_right;
+ pp = &next->rb_right;
}
*parent_p = parent;
*pp_p = pp;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0162fb955b33..467e44d7587d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct ip_tunnel *tunnel;
struct erspanhdr *ershdr;
const struct iphdr *iph;
- __be32 session_id;
__be32 index;
int len;
@@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
/* The original GRE header does not have key field,
* Use ERSPAN 10-bit session ID as key.
*/
- session_id = cpu_to_be32(ntohs(ershdr->session_id));
- tpi->key = session_id;
+ tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
index = ershdr->md.index;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
@@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
- if (skb->len > dev->mtu) {
+ if (skb->len - dev->hard_header_len > dev->mtu) {
pskb_trim(skb, dev->mtu);
truncate = true;
}
@@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev)
{
__gre_tunnel_init(dev);
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_keep_dst(dev);
return ip_tunnel_init(dev);
}
@@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev)
tunnel->tun_hlen = 8;
tunnel->parms.iph.protocol = IPPROTO_GRE;
- t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr);
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+ sizeof(struct erspanhdr);
+ t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
dev->mtu = ETH_DATA_LEN - t_hlen - 4;
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_keep_dst(dev);
return ip_tunnel_init(dev);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fa2dc8f692c6..57fc13c6ab2b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -311,9 +311,10 @@ drop:
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
- struct rtable *rt;
+ int (*edemux)(struct sk_buff *skb);
struct net_device *dev = skb->dev;
- void (*edemux)(struct sk_buff *skb);
+ struct rtable *rt;
+ int err;
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
@@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
- edemux(skb);
+ err = edemux(skb);
+ if (unlikely(err))
+ goto drop_error;
/* must reload iph, skb->head might have changed */
iph = ip_hdr(skb);
}
@@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
* how the packet travels inside Linux networking.
*/
if (!skb_valid_dst(skb)) {
- int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, dev);
- if (unlikely(err)) {
- if (err == -EXDEV)
- __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
- goto drop;
- }
+ err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, dev);
+ if (unlikely(err))
+ goto drop_error;
}
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
drop:
kfree_skb(skb);
return NET_RX_DROP;
+
+drop_error:
+ if (err == -EXDEV)
+ __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
+ goto drop;
}
/*
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5ed63d250950..89453cf62158 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_parm *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
+ int pkt_len = skb->len;
int err;
int mtu;
@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
err = dst_output(tunnel->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
- err = skb->len;
+ err = pkt_len;
iptunnel_xmit_stats(dev, err);
return NETDEV_TX_OK;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 811689e523c3..f75fc6b53115 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
if (synproxy == NULL)
return NF_ACCEPT;
- if (nf_is_loopback_packet(skb))
+ if (nf_is_loopback_packet(skb) ||
+ ip_hdr(skb)->protocol != IPPROTO_TCP)
return NF_ACCEPT;
thoff = ip_hdrlen(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 94d4cd2d5ea4..3d9f1c2f81c5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
EXPORT_SYMBOL(rt_dst_alloc);
/* called in rcu_read_lock() section */
-static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, int our)
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag)
{
- struct rtable *rth;
- struct in_device *in_dev = __in_dev_get_rcu(dev);
- unsigned int flags = RTCF_MULTICAST;
- u32 itag = 0;
int err;
/* Primary sanity checks. */
-
if (!in_dev)
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
skb->protocol != htons(ETH_P_IP))
- goto e_inval;
+ return -EINVAL;
if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
- goto e_inval;
+ return -EINVAL;
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr))
- goto e_inval;
+ return -EINVAL;
} else {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
- in_dev, &itag);
+ in_dev, itag);
if (err < 0)
- goto e_err;
+ return err;
}
+ return 0;
+}
+
+/* called in rcu_read_lock() section */
+static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, int our)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ unsigned int flags = RTCF_MULTICAST;
+ struct rtable *rth;
+ u32 itag = 0;
+ int err;
+
+ err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
+ if (err)
+ return err;
+
if (our)
flags |= RTCF_LOCAL;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
- goto e_nobufs;
+ return -ENOBUFS;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
@@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb_dst_set(skb, &rth->dst);
return 0;
-
-e_nobufs:
- return -ENOBUFS;
-e_inval:
- return -EINVAL;
-e_err:
- return err;
}
@@ -2507,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
- rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+ rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d9416b5162bc..85164d4d3e53 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1503,23 +1503,23 @@ csum_err:
}
EXPORT_SYMBOL(tcp_v4_do_rcv);
-void tcp_v4_early_demux(struct sk_buff *skb)
+int tcp_v4_early_demux(struct sk_buff *skb)
{
const struct iphdr *iph;
const struct tcphdr *th;
struct sock *sk;
if (skb->pkt_type != PACKET_HOST)
- return;
+ return 0;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
- return;
+ return 0;
iph = ip_hdr(skb);
th = tcp_hdr(skb);
if (th->doff < sizeof(struct tcphdr) / 4)
- return;
+ return 0;
sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
iph->saddr, th->source,
@@ -1538,6 +1538,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb_dst_set_noref(skb, dst);
}
}
+ return 0;
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ef29df8648e4..e45177ceb0ee 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2221,9 +2221,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
return NULL;
}
-void udp_v4_early_demux(struct sk_buff *skb)
+int udp_v4_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct in_device *in_dev = NULL;
const struct iphdr *iph;
const struct udphdr *uh;
struct sock *sk = NULL;
@@ -2234,25 +2235,21 @@ void udp_v4_early_demux(struct sk_buff *skb)
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
- return;
+ return 0;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
- if (skb->pkt_type == PACKET_BROADCAST ||
- skb->pkt_type == PACKET_MULTICAST) {
- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
- return;
+ return 0;
- /* we are supposed to accept bcast packets */
- if (skb->pkt_type == PACKET_MULTICAST) {
- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
- iph->protocol);
- if (!ours)
- return;
- }
+ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+ iph->protocol);
+ if (!ours)
+ return 0;
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr,
@@ -2263,7 +2260,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
}
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
- return;
+ return 0;
skb->sk = sk;
skb->destructor = sock_efree;
@@ -2272,12 +2269,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
if (dst)
dst = dst_check(dst, 0);
if (dst) {
+ u32 itag = 0;
+
/* set noref for now.
* any place which wants to hold dst has to call
* dst_hold_safe()
*/
skb_dst_set_noref(skb, dst);
+
+ /* for unconnected multicast sockets we need to validate
+ * the source on each packet
+ */
+ if (!inet_sk(sk)->inet_daddr && in_dev)
+ return ip_mc_validate_source(skb, iph->daddr,
+ iph->saddr, iph->tos,
+ skb->dev, in_dev, &itag);
}
+ return 0;
}
int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 97658bfc1b58..e360d55be555 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
* will be using a length value equal to only one MSS sized
* segment instead of the entire frame.
*/
- if (gso_partial) {
+ if (gso_partial && skb_is_gso(skb)) {
uh->len = htons(skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)uh);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 96861c702c06..4a96ebbf8eda 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
goto out;
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
- idev->cnf.accept_dad < 1 ||
+ (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
+ idev->cnf.accept_dad < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bump_id = ifp->flags & IFA_F_TENTATIVE;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 20f66f4c9460..1602b491b281 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1311,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_keep_dst(dev);
}
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cdb3728faca7..4a87f9428ca5 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
- if (gso_partial)
+ if (gso_partial && skb_is_gso(skb))
payload_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f2f21c24915f..a1c24443cd9e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
int mtu;
+ unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
bool use_cache = false;
@@ -1124,7 +1125,7 @@ route_lookup:
t->parms.name);
goto tx_err_dst_release;
}
- mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
+ mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@@ -1133,7 +1134,7 @@ route_lookup:
mtu = IPV6_MIN_MTU;
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
+ if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 79444a4bfd6d..bcdc2d557de1 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
struct xfrm_state *x;
+ int pkt_len = skb->len;
int err = -1;
int mtu;
@@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += skb->len;
+ tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a5cd43d75393..437af8c95277 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
nexthdr = ipv6_hdr(skb)->nexthdr;
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
- if (thoff < 0)
+ if (thoff < 0 || nexthdr != IPPROTO_TCP)
return NF_ACCEPT;
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 26cc9f483b6d..a96d5b385d8f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
struct dst_entry *new = NULL;
rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
- DST_OBSOLETE_NONE, 0);
+ DST_OBSOLETE_DEAD, 0);
if (rt) {
rt6_info_init(rt);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ee485df73ccd..02d61101b108 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1314,6 +1314,9 @@ again:
hlist_del_init(&session->hlist);
+ if (test_and_set_bit(0, &session->dead))
+ goto again;
+
if (session->ref != NULL)
(*session->ref)(session);
@@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
/* This function is used by the netlink TUNNEL_DELETE command.
*/
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- l2tp_tunnel_inc_refcount(tunnel);
- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
- l2tp_tunnel_dec_refcount(tunnel);
- return 1;
+ if (!test_and_set_bit(0, &tunnel->dead)) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ queue_work(l2tp_wq, &tunnel->del_work);
}
- return 0;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
*/
int l2tp_session_delete(struct l2tp_session *session)
{
+ if (test_and_set_bit(0, &session->dead))
+ return 0;
+
if (session->ref)
(*session->ref)(session);
__l2tp_session_unhash(session);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a305e0c5925a..67c79d9b5c6c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -76,6 +76,7 @@ struct l2tp_session_cfg {
struct l2tp_session {
int magic; /* should be
* L2TP_SESSION_MAGIC */
+ long dead;
struct l2tp_tunnel *tunnel; /* back pointer to tunnel
* context */
@@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+
+ unsigned long dead;
+
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
bool acpt_newsess; /* Indicates whether this
@@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 87da9ef61860..014a7bc2a872 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -44,7 +44,6 @@ struct l2tp_eth {
struct net_device *dev;
struct sock *tunnel_sock;
struct l2tp_session *session;
- struct list_head list;
atomic_long_t tx_bytes;
atomic_long_t tx_packets;
atomic_long_t tx_dropped;
@@ -58,17 +57,6 @@ struct l2tp_eth_sess {
struct net_device *dev;
};
-/* per-net private data for this module */
-static unsigned int l2tp_eth_net_id;
-struct l2tp_eth_net {
- struct list_head l2tp_eth_dev_list;
- spinlock_t l2tp_eth_lock;
-};
-
-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
-{
- return net_generic(net, l2tp_eth_net_id);
-}
static int l2tp_eth_dev_init(struct net_device *dev)
{
@@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
static void l2tp_eth_dev_uninit(struct net_device *dev)
{
- struct l2tp_eth *priv = netdev_priv(dev);
- struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
-
- spin_lock(&pn->l2tp_eth_lock);
- list_del_init(&priv->list);
- spin_unlock(&pn->l2tp_eth_lock);
dev_put(dev);
}
@@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
- struct l2tp_eth_net *pn;
if (cfg->ifname) {
strlcpy(name, cfg->ifname, IFNAMSIZ);
@@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
- INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
@@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
strlcpy(session->ifname, dev->name, IFNAMSIZ);
dev_hold(dev);
- pn = l2tp_eth_pernet(dev_net(dev));
- spin_lock(&pn->l2tp_eth_lock);
- list_add(&priv->list, &pn->l2tp_eth_dev_list);
- spin_unlock(&pn->l2tp_eth_lock);
return 0;
@@ -342,22 +318,6 @@ out:
return rc;
}
-static __net_init int l2tp_eth_init_net(struct net *net)
-{
- struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
-
- INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
- spin_lock_init(&pn->l2tp_eth_lock);
-
- return 0;
-}
-
-static struct pernet_operations l2tp_eth_net_ops = {
- .init = l2tp_eth_init_net,
- .id = &l2tp_eth_net_id,
- .size = sizeof(struct l2tp_eth_net),
-};
-
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
.session_create = l2tp_eth_create,
@@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void)
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
if (err)
- goto out;
-
- err = register_pernet_device(&l2tp_eth_net_ops);
- if (err)
- goto out_unreg;
+ goto err;
pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
-out_unreg:
- l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
-out:
+err:
return err;
}
static void __exit l2tp_eth_exit(void)
{
- unregister_pernet_device(&l2tp_eth_net_ops);
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 50e3ee9a9d61..bc6e8bfc5be4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
- if (sock) {
+ if (sock)
inet_shutdown(sock, SEND_SHUTDOWN);
- /* Don't let the session go away before our socket does */
- l2tp_session_inc_refcount(session);
- }
+
+ /* Don't let the session go away before our socket does */
+ l2tp_session_inc_refcount(session);
}
/* Really kill the session socket. (Called from sock_put() if
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e495b5e484b1..cf84f7b37cd9 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
from->family == to->family))
return -IPSET_ERR_TYPE_MISMATCH;
- if (from->ref_netlink || to->ref_netlink)
+ write_lock_bh(&ip_set_ref_lock);
+
+ if (from->ref_netlink || to->ref_netlink) {
+ write_unlock_bh(&ip_set_ref_lock);
return -EBUSY;
+ }
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
- write_lock_bh(&ip_set_ref_lock);
swap(from->ref, to->ref);
ip_set(inst, from_id) = to;
ip_set(inst, to_id) = from;
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
static int __init
ip_set_init(void)
{
- int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ int ret = register_pernet_subsys(&ip_set_net_ops);
+
+ if (ret) {
+ pr_err("ip_set: cannot register pernet_subsys.\n");
+ return ret;
+ }
+ ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
if (ret != 0) {
pr_err("ip_set: cannot register with nfnetlink.\n");
+ unregister_pernet_subsys(&ip_set_net_ops);
return ret;
}
+
ret = nf_register_sockopt(&so_set);
if (ret != 0) {
pr_err("SO_SET registry failed: %d\n", ret);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ unregister_pernet_subsys(&ip_set_net_ops);
return ret;
}
- ret = register_pernet_subsys(&ip_set_net_ops);
- if (ret) {
- pr_err("ip_set: cannot register pernet_subsys.\n");
- nf_unregister_sockopt(&so_set);
- nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
- return ret;
- }
+
pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
return 0;
}
@@ -2098,9 +2104,10 @@ ip_set_init(void)
static void __exit
ip_set_fini(void)
{
- unregister_pernet_subsys(&ip_set_net_ops);
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+
+ unregister_pernet_subsys(&ip_set_net_ops);
pr_debug("these are the famous last words\n");
}
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 20bfbd315f61..613eb212cb48 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
ip &= ip_set_hostmask(h->netmask);
+ e.ip = htonl(ip);
+ if (e.ip == 0)
+ return -IPSET_ERR_HASH_ELEM;
- if (adt == IPSET_TEST) {
- e.ip = htonl(ip);
- if (e.ip == 0)
- return -IPSET_ERR_HASH_ELEM;
+ if (adt == IPSET_TEST)
return adtfn(set, &e, &ext, &ext, flags);
- }
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
- if (retried)
+ if (retried) {
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip += hosts) {
e.ip = htonl(ip);
- if (e.ip == 0)
- return -IPSET_ERR_HASH_ELEM;
+ }
+ for (; ip <= ip_to;) {
ret = adtfn(set, &e, &ext, &ext, flags);
-
if (ret && !ip_set_eexist(ret, flags))
return ret;
+ ip += hosts;
+ e.ip = htonl(ip);
+ if (e.ip == 0)
+ return 0;
+
ret = 0;
}
return ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index b64cf14e8352..f3ba8348cf9d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
e.ip = htonl(ip);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index f438740e6c6a..ddb8039ec1d2 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 6215fb898c50..a7f4d7a85420 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5ab1b99a53c2..a2f19b9906e9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
e.ip = htonl(ip);
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip == ntohl(h->next.ip) &&
p == ntohs(h->next.port)
? ntohl(h->next.ip2) : ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&cidr);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 5d9e895452e7..1c67a1761e45 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
}
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 44cf11939c91..d417074f1c1a 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index db614e13b193..7f9ae2e9645b 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip[0]);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip[0] = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
ip2 = (retried &&
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
: ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2);
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 54b64b6cd0cd..e6ef382febe4 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr = cidr - 1;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index aff846960ac4..8602f2595a1a 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip[0]);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip[0] = htonl(ip);
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
: ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&e.cidr[1]);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 90d396814798..4527921b1c3a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
{
struct sk_buff *new_skb = NULL;
struct iphdr *old_iph = NULL;
+ __u8 old_dsfield;
#ifdef CONFIG_IP_VS_IPV6
struct ipv6hdr *old_ipv6h = NULL;
#endif
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
*payload_len =
ntohs(old_ipv6h->payload_len) +
sizeof(*old_ipv6h);
- *dsfield = ipv6_get_dsfield(old_ipv6h);
+ old_dsfield = ipv6_get_dsfield(old_ipv6h);
*ttl = old_ipv6h->hop_limit;
if (df)
*df = 0;
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
/* fix old IP header checksum */
ip_send_check(old_iph);
- *dsfield = ipv4_get_dsfield(old_iph);
+ old_dsfield = ipv4_get_dsfield(old_iph);
*ttl = old_iph->ttl;
if (payload_len)
*payload_len = ntohs(old_iph->tot_len);
}
+ /* Implement full-functionality option for ECN encapsulation */
+ *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
+
return skb;
error:
kfree_skb(skb);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 929927171426..64e1ee091225 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
goto nla_put_failure;
- if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+ if (basechain->stats && nft_dump_stats(skb, basechain->stats))
goto nla_put_failure;
}
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
genmask);
- if (IS_ERR(chain2))
- return PTR_ERR(chain2);
+ if (!IS_ERR(chain2))
+ return -EEXIST;
}
if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2741,8 +2741,10 @@ cont:
list_for_each_entry(i, &ctx->table->sets, list) {
if (!nft_is_active_next(ctx->net, i))
continue;
- if (!strcmp(set->name, i->name))
+ if (!strcmp(set->name, i->name)) {
+ kfree(set->name);
return -ENFILE;
+ }
}
return 0;
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c83a3b5e1c6c..d8571f414208 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
return ERR_PTR(-EFAULT);
- strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+ memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
info->num_counters = compat_tmp.num_counters;
user += sizeof(compat_tmp);
} else
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(info, user, sizeof(*info)) != 0)
return ERR_PTR(-EFAULT);
- info->name[sizeof(info->name) - 1] = '\0';
user += sizeof(*info);
}
+ info->name[sizeof(info->name) - 1] = '\0';
size = sizeof(struct xt_counters);
size *= info->num_counters;
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 38986a95216c..29123934887b 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/syscalls.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/bpf.h>
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
return 0;
}
+static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
+{
+ mm_segment_t oldfs = get_fs();
+ int retval, fd;
+
+ set_fs(KERNEL_DS);
+ fd = bpf_obj_get_user(path);
+ set_fs(oldfs);
+ if (fd < 0)
+ return fd;
+
+ retval = __bpf_mt_check_fd(fd, ret);
+ sys_close(fd);
+ return retval;
+}
+
static int bpf_mt_check(const struct xt_mtchk_param *par)
{
struct xt_bpf_info *info = par->matchinfo;
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
- else if (info->mode == XT_BPF_MODE_FD_PINNED ||
- info->mode == XT_BPF_MODE_FD_ELF)
+ else if (info->mode == XT_BPF_MODE_FD_ELF)
return __bpf_mt_check_fd(info->fd, &info->filter);
+ else if (info->mode == XT_BPF_MODE_PATH_PINNED)
+ return __bpf_mt_check_path(info->path, &info->filter);
else
return -EINVAL;
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index e75ef39669c5..575d2153e3b8 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
transparent = nf_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
transparent = nf_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 327807731b44..f34750691c5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2266,14 +2266,18 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
+ if (cb->start) {
+ ret = cb->start(cb);
+ if (ret)
+ goto error_unlock;
+ }
+
nlk->cb_running = true;
mutex_unlock(nlk->cb_mutex);
- if (cb->start)
- cb->start(cb);
-
ret = netlink_dump(sk);
+
sock_put(sk);
if (ret)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d288f52c53f7..bec01a3daf5b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2840,6 +2840,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
struct packet_sock *po = pkt_sk(sk);
+ bool has_vnet_hdr = false;
int hlen, tlen, linear;
int extra_len = 0;
@@ -2883,6 +2884,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
if (err)
goto out_unlock;
+ has_vnet_hdr = true;
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2941,7 +2943,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->priority = sk->sk_priority;
skb->mark = sockc.mark;
- if (po->has_vnet_hdr) {
+ if (has_vnet_hdr) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
if (err)
goto out_free;
@@ -3069,13 +3071,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
int ret = 0;
bool unlisted = false;
- if (po->fanout)
- return -EINVAL;
-
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
+ if (po->fanout) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 22ed01a76b19..a72a7d925d46 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
.r = r,
.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
};
+ int pos = cb->args[2];
/* eps hashtable dumps
* args:
@@ -493,7 +494,8 @@ skip:
goto done;
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
- net, (int *)&cb->args[2], &commp);
+ net, &pos, &commp);
+ cb->args[2] = pos;
done:
cb->args[1] = cb->args[4];
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9b5de31aa429..c1841f234a71 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
struct sock_xprt *transport =
container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
- struct socket *sock = transport->sock;
+ struct socket *sock;
int status = -EIO;
sock = xs_create_sock(xprt, transport,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7d99029df342..a140dd4a84af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct sk_buff_head xmitq;
int rc = 0;
- __skb_queue_head_init(&xmitq);
+ skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
if (tipc_link_bc_peers(l))
rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
u32 dst, selector;
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
- __skb_queue_head_init(&_pkts);
+ skb_queue_head_init(&_pkts);
list_for_each_entry_safe(n, tmp, &dests->list, list) {
dst = n->value;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 6ef379f004ac..17146c16ee2d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
return false;
if (msg_errcode(msg))
return false;
- *err = -TIPC_ERR_NO_NAME;
+ *err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
*err = TIPC_OK;
+
+ if (!skb_cloned(skb))
+ return true;
+
+ /* Unclone buffer in case it was bundled */
+ if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+ return false;
+
return true;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 690874293cfc..d396cb61a280 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
[NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
};
+/* policy for packet pattern attributes */
+static const struct nla_policy
+nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
+ [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
u8 *mask_pat;
nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
- NULL, info->extack);
+ nl80211_packet_pattern_policy,
+ info->extack);
err = -EINVAL;
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
@@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
rem) {
u8 *mask_pat;
- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
+ nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+ nl80211_packet_pattern_policy, NULL);
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
return -EINVAL;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index acf00104ef31..30e5746085b8 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
}
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+ xso->dev = NULL;
dev_put(dev);
return 0;
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 2515cd2bc5db..8ac9d32fb79d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -429,7 +429,8 @@ resume:
nf_reset(skb);
if (decaps) {
- skb->sp->olen = 0;
+ if (skb->sp)
+ skb->sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return 0;
@@ -440,7 +441,8 @@ resume:
err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
if (xfrm_gro) {
- skb->sp->olen = 0;
+ if (skb->sp)
+ skb->sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return err;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0dab1cd79ce4..12213477cd3a 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -732,12 +732,12 @@ restart:
}
}
}
+out:
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (cnt) {
err = 0;
xfrm_policy_cache_flush();
}
-out:
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2bfbd9121e3b..b997f1395357 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
+ xfrm_dev_state_delete(x);
__xfrm_state_put(x);
goto out;
}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index dd2c262aebbf..8b80bac055e4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6390,7 +6390,7 @@ sub process {
exit(0);
}
- if (!$is_patch && $file !~ /cover-letter\.patch$/) {
+ if (!$is_patch && $filename !~ /cover-letter\.patch$/) {
ERROR("NOT_UNIFIED_DIFF",
"Does not appear to be a unified-diff format patch\n");
}
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 29df825d375c..2f6ce802397d 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -103,11 +103,12 @@ __faddr2line() {
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates.
+ file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
while read symbol; do
local fields=($symbol)
local sym_base=0x${fields[0]}
local sym_type=${fields[1]}
- local sym_end=0x${fields[3]}
+ local sym_end=${fields[3]}
# calculate the size
local sym_size=$(($sym_end - $sym_base))
@@ -157,7 +158,7 @@ __faddr2line() {
addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
DONE=1
- done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }')
+ done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 5d554419170b..9ee9bf7fd1a2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
else if (str[0] == '$')
return -1;
/* exclude debugging symbols */
- else if (stype == 'N')
+ else if (stype == 'N' || stype == 'n')
return -1;
/* include the type field in the symbol name, so that it gets
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 400ef35169c5..aa0cc49ad1ad 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -53,6 +53,7 @@ acumulator||accumulator
adapater||adapter
addional||additional
additionaly||additionally
+additonal||additional
addres||address
adddress||address
addreses||addresses
@@ -67,6 +68,8 @@ adviced||advised
afecting||affecting
againt||against
agaist||against
+aggreataon||aggregation
+aggreation||aggregation
albumns||albums
alegorical||allegorical
algined||aligned
@@ -80,6 +83,8 @@ aligment||alignment
alignement||alignment
allign||align
alligned||aligned
+alllocate||allocate
+alloated||allocated
allocatote||allocate
allocatrd||allocated
allocte||allocate
@@ -171,6 +176,7 @@ availale||available
availavility||availability
availble||available
availiable||available
+availible||available
avalable||available
avaliable||available
aysnc||async
@@ -203,6 +209,7 @@ broadcat||broadcast
cacluated||calculated
caculation||calculation
calender||calendar
+calescing||coalescing
calle||called
callibration||calibration
calucate||calculate
@@ -210,6 +217,7 @@ calulate||calculate
cancelation||cancellation
cancle||cancel
capabilites||capabilities
+capabilty||capability
capabitilies||capabilities
capatibilities||capabilities
capapbilities||capabilities
@@ -302,6 +310,7 @@ containts||contains
contaisn||contains
contant||contact
contence||contents
+continious||continuous
continous||continuous
continously||continuously
continueing||continuing
@@ -393,6 +402,7 @@ differrence||difference
diffrent||different
diffrentiate||differentiate
difinition||definition
+dimesions||dimensions
diplay||display
direectly||directly
disassocation||disassociation
@@ -449,6 +459,7 @@ equiped||equipped
equivelant||equivalent
equivilant||equivalent
eror||error
+errorr||error
estbalishment||establishment
etsablishment||establishment
etsbalishment||establishment
@@ -481,6 +492,7 @@ failied||failed
faillure||failure
failue||failure
failuer||failure
+failng||failing
faireness||fairness
falied||failed
faliure||failure
@@ -493,6 +505,7 @@ fetaure||feature
fetaures||features
fileystem||filesystem
fimware||firmware
+firware||firmware
finanize||finalize
findn||find
finilizes||finalizes
@@ -502,6 +515,7 @@ folloing||following
followign||following
followings||following
follwing||following
+fonud||found
forseeable||foreseeable
forse||force
fortan||fortran
@@ -532,6 +546,7 @@ grabing||grabbing
grahical||graphical
grahpical||graphical
grapic||graphic
+grranted||granted
guage||gauge
guarenteed||guaranteed
guarentee||guarantee
@@ -543,6 +558,7 @@ happend||happened
harware||hardware
heirarchically||hierarchically
helpfull||helpful
+hybernate||hibernate
hierachy||hierarchy
hierarchie||hierarchy
howver||however
@@ -565,16 +581,19 @@ implemenation||implementation
implementaiton||implementation
implementated||implemented
implemention||implementation
+implementd||implemented
implemetation||implementation
implemntation||implementation
implentation||implementation
implmentation||implementation
implmenting||implementing
+incative||inactive
incomming||incoming
incompatabilities||incompatibilities
incompatable||incompatible
inconsistant||inconsistent
increas||increase
+incremeted||incremented
incrment||increment
indendation||indentation
indended||intended
@@ -619,6 +638,7 @@ interger||integer
intermittant||intermittent
internel||internal
interoprability||interoperability
+interuupt||interrupt
interrface||interface
interrrupt||interrupt
interrup||interrupt
@@ -638,8 +658,10 @@ intrrupt||interrupt
intterrupt||interrupt
intuative||intuitive
invaid||invalid
+invald||invalid
invalde||invalid
invalide||invalid
+invalidiate||invalidate
invalud||invalid
invididual||individual
invokation||invocation
@@ -713,6 +735,7 @@ misformed||malformed
mispelled||misspelled
mispelt||misspelt
mising||missing
+mismactch||mismatch
missmanaged||mismanaged
missmatch||mismatch
miximum||maximum
@@ -731,6 +754,7 @@ multidimensionnal||multidimensional
multple||multiple
mumber||number
muticast||multicast
+mutilcast||multicast
mutiple||multiple
mutli||multi
nams||names
@@ -834,6 +858,7 @@ posible||possible
positon||position
possibilites||possibilities
powerfull||powerful
+preample||preamble
preapre||prepare
preceeded||preceded
preceeding||preceding
@@ -1059,6 +1084,7 @@ sturcture||structure
subdirectoires||subdirectories
suble||subtle
substract||subtract
+submition||submission
succesfully||successfully
succesful||successful
successed||succeeded
@@ -1078,6 +1104,7 @@ suppoted||supported
suppported||supported
suppport||support
supress||suppress
+surpressed||suppressed
surpresses||suppresses
susbsystem||subsystem
suspeneded||suspended
@@ -1091,6 +1118,7 @@ swithced||switched
swithcing||switching
swithed||switched
swithing||switching
+swtich||switch
symetric||symmetric
synax||syntax
synchonized||synchronized
@@ -1111,7 +1139,9 @@ therfore||therefore
thier||their
threds||threads
threshhold||threshold
+thresold||threshold
throught||through
+troughput||throughput
thses||these
tiggered||triggered
tipically||typically
@@ -1120,6 +1150,7 @@ tmis||this
torerable||tolerable
tramsmitted||transmitted
tramsmit||transmit
+tranasction||transaction
tranfer||transfer
transciever||transceiver
transferd||transferred
@@ -1133,6 +1164,7 @@ trasmission||transmission
treshold||threshold
trigerring||triggering
trun||turn
+tunning||tuning
ture||true
tyep||type
udpate||update
@@ -1199,6 +1231,7 @@ visiters||visitors
vitual||virtual
wakeus||wakeups
wating||waiting
+wiat||wait
wether||whether
whataver||whatever
whcih||which
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 319add31b4a4..286171a16ed2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1473,7 +1473,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
* @inode: the object
* @name: attribute name
* @buffer: where to put the result
- * @alloc: unused
+ * @alloc: duplicate memory
*
* Returns the size of the attribute or an error code
*/
@@ -1486,43 +1486,38 @@ static int smack_inode_getsecurity(struct inode *inode,
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
struct smack_known *isp;
- int ilen;
- int rc = 0;
- if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
isp = smk_of_inode(inode);
- ilen = strlen(isp->smk_known);
- *buffer = isp->smk_known;
- return ilen;
- }
+ else {
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ sbp = ip->i_sb;
+ if (sbp->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
- /*
- * The rest of the Smack xattrs are only on sockets.
- */
- sbp = ip->i_sb;
- if (sbp->s_magic != SOCKFS_MAGIC)
- return -EOPNOTSUPP;
+ sock = SOCKET_I(ip);
+ if (sock == NULL || sock->sk == NULL)
+ return -EOPNOTSUPP;
- sock = SOCKET_I(ip);
- if (sock == NULL || sock->sk == NULL)
- return -EOPNOTSUPP;
-
- ssp = sock->sk->sk_security;
+ ssp = sock->sk->sk_security;
- if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in;
- else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out;
- else
- return -EOPNOTSUPP;
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ isp = ssp->smk_in;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+ isp = ssp->smk_out;
+ else
+ return -EOPNOTSUPP;
+ }
- ilen = strlen(isp->smk_known);
- if (rc == 0) {
- *buffer = isp->smk_known;
- rc = ilen;
+ if (alloc) {
+ *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+ if (*buffer == NULL)
+ return -ENOMEM;
}
- return rc;
+ return strlen(isp->smk_known);
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index fec1dfdb14ad..4490a699030b 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
static int snd_compress_dev_register(struct snd_device *device)
{
int ret = -EINVAL;
- char str[16];
struct snd_compr *compr;
if (snd_BUG_ON(!device || !device->device_data))
return -EBADFD;
compr = device->device_data;
- pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
+ pr_debug("reg device %s, direction %d\n", compr->name,
compr->direction);
/* register compressed device */
ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 3a1cc7b97e46..b719d0bd833e 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 {
u32 pad2; /* alignment */
struct timespec tstamp;
s32 suspended_state;
+ s32 pad3;
struct timespec audio_tstamp;
} __packed;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index ea2d0ae85bd3..6c9cba2166d9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
struct snd_seq_port_callback *callback;
+ int port_idx;
/* it is not allowed to create the port for an another client */
if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
return -ENOMEM;
if (client->type == USER_CLIENT && info->kernel) {
- snd_seq_delete_port(client, port->addr.port);
+ port_idx = port->addr.port;
+ snd_seq_port_unlock(port);
+ snd_seq_delete_port(client, port_idx);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
snd_seq_set_port_info(port, info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+ snd_seq_port_unlock(port);
return 0;
}
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 0a7020c82bfc..d21ece9f8d73 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
}
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
+ snd_use_lock_use(&new_port->use_lock);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
+ sprintf(new_port->name, "port-%d", num);
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
- sprintf(new_port->name, "port-%d", num);
return new_port;
}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 8d93a4021c78..f48a4cd24ffc 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
- struct snd_seq_event *ev)
+ struct snd_seq_event *ev,
+ bool atomic)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
- read_lock(&rdev->filelist_lock);
+ if (atomic)
+ read_lock(&rdev->filelist_lock);
+ else
+ down_read(&rdev->filelist_sem);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
- read_unlock(&rdev->filelist_lock);
+ if (atomic)
+ read_unlock(&rdev->filelist_lock);
+ else
+ up_read(&rdev->filelist_sem);
return 0;
}
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, true);
}
#endif /* 0 */
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, atomic);
}
/*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
- unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
- write_lock_irqsave(&rdev->filelist_lock, flags);
+ down_write(&rdev->filelist_sem);
+ write_lock_irq(&rdev->filelist_lock);
list_add_tail(&vmidi->list, &rdev->filelist);
- write_unlock_irqrestore(&rdev->filelist_lock, flags);
+ write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
vmidi->rdev = rdev;
return 0;
}
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
+ down_write(&rdev->filelist_sem);
write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
+ init_rwsem(&rdev->filelist_sem);
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50b21f9..5badd08e1d69 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
void __user *puhr;
union hpi_message_buffer_v1 *hm;
union hpi_response_buffer_v1 *hr;
+ u16 msg_size;
u16 res_max_size;
u32 uncopied_bytes;
int err = 0;
@@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* Now read the message size and data from user space. */
- if (get_user(hm->h.size, (u16 __user *)puhm)) {
+ if (get_user(msg_size, (u16 __user *)puhm)) {
err = -EFAULT;
goto out;
}
- if (hm->h.size > sizeof(*hm))
- hm->h.size = sizeof(*hm);
+ if (msg_size > sizeof(*hm))
+ msg_size = sizeof(*hm);
/* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
- uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
+ uncopied_bytes = copy_from_user(hm, puhm, msg_size);
if (uncopied_bytes) {
HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
err = -EFAULT;
goto out;
}
+ /* Override h.size in case it is changed between two userspace fetches */
+ hm->h.size = msg_size;
+
if (get_user(res_max_size, (u16 __user *)puhr)) {
err = -EFAULT;
goto out;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 7326695bca33..d68f99e076a8 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_busses_in(chip);
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_pipes_out(chip);
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 96;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = 0;
#ifdef ECHOCARD_HAS_VMIXER
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
#endif
uinfo->dimen.d[1] = 16; /* 16 channels */
uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
return 0;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2b64fabd5faa..c19c81d230bd 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
hda_nid_t pin_nid, u32 stream_tag, int format)
{
struct hdmi_spec *spec = codec->spec;
+ unsigned int param;
int err;
err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
@@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
return err;
}
+ if (is_haswell_plus(codec)) {
+
+ /*
+ * on recent platforms IEC Coding Type is required for HBR
+ * support, read current Digital Converter settings and set
+ * ICT bitfield if needed.
+ */
+ param = snd_hda_codec_read(codec, cvt_nid, 0,
+ AC_VERB_GET_DIGI_CONVERT_1, 0);
+
+ param = (param >> 16) & ~(AC_DIG3_ICT);
+
+ /* on recent platforms ICT mode is required for HBR support */
+ if (is_hbr_format(format))
+ param |= 0x1;
+
+ snd_hda_codec_write(codec, cvt_nid, 0,
+ AC_VERB_SET_DIGI_CONVERT_3, param);
+ }
+
snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format);
return 0;
}
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 0fb6b1b79261..d8409d9ae55b 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
if (err)
- return err;
+ goto err_kill_urb;
- if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
- return -ENODEV;
+ if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+ err = -ENODEV;
+ goto err_kill_urb;
+ }
usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
setup_card(cdev);
return 0;
+
+ err_kill_urb:
+ usb_kill_urb(&cdev->ep1_in_urb);
+ return err;
}
static int snd_probe(struct usb_interface *intf,
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 3dc36d913550..23d1d23aefec 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
struct usb_interface_descriptor *altsd;
void *control_header;
int i, protocol;
+ int rest_bytes;
/* find audiocontrol interface */
host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
return -EINVAL;
}
+ rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
+ control_header;
+
+ /* just to be sure -- this shouldn't hit at all */
+ if (rest_bytes <= 0) {
+ dev_err(&dev->dev, "invalid control header\n");
+ return -EINVAL;
+ }
+
switch (protocol) {
default:
dev_warn(&dev->dev,
@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
case UAC_VERSION_1: {
struct uac1_ac_header_descriptor *h1 = control_header;
+ if (rest_bytes < sizeof(*h1)) {
+ dev_err(&dev->dev, "too short v1 buffer descriptor\n");
+ return -EINVAL;
+ }
+
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
}
+ if (rest_bytes < h1->bLength) {
+ dev_err(&dev->dev, "invalid buffer length (v1)\n");
+ return -EINVAL;
+ }
+
if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
return -EINVAL;
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 0ff5a7d2e19f..c8f723c3a033 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
return 0;
error:
- if (line6->disconnect)
- line6->disconnect(line6);
- snd_card_free(card);
+ /* we can call disconnect callback here because no close-sync is
+ * needed yet at this point
+ */
+ line6_disconnect(interface);
return ret;
}
EXPORT_SYMBOL_GPL(line6_probe);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 956f847a96e4..451007c27743 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
intf = usb_ifnum_to_if(line6->usbdev,
pod->line6.properties->ctrl_if);
- usb_driver_release_interface(&podhd_driver, intf);
+ if (intf)
+ usb_driver_release_interface(&podhd_driver, intf);
}
}
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
line6->disconnect = podhd_disconnect;
+ init_timer(&pod->startup_timer);
+ INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
/* claim the data interface */
intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
}
/* init device and delay registering */
- init_timer(&pod->startup_timer);
- INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
podhd_startup(pod);
return 0;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9732edf77f86..91bc8f18791e 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
+ /* kill pending URBs */
+ snd_usb_mixer_disconnect(mixer);
+
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ _error:
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
- usb_kill_urb(mixer->urb);
- usb_kill_urb(mixer->rc_urb);
+ if (mixer->disconnected)
+ return;
+ if (mixer->urb)
+ usb_kill_urb(mixer->urb);
+ if (mixer->rc_urb)
+ usb_kill_urb(mixer->rc_urb);
+ mixer->disconnected = true;
}
#ifdef CONFIG_PM
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 2b4b067646ab..545d99b09706 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
struct urb *rc_urb;
struct usb_ctrlrequest *rc_setup_packet;
u8 rc_buffer[6];
+
+ bool disconnected;
};
#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 913552078285..9ddaae3784f5 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1137,6 +1137,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
+ case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
+ case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index 4dab49080700..e229abd21652 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
}
pg = get_order(read_size);
- sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->s) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
goto out;
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
pg = get_order(write_size);
sk->write_page =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->write_page) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
usb_stream_free(sk);
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 43ab5c402f98..f90860d1f897 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -312,7 +312,7 @@ union bpf_attr {
* jump into another BPF program
* @ctx: context pointer passed to next program
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- * @index: index inside array that selects specific program to run
+ * @index: 32-bit index inside array that selects specific program to run
* Return: 0 on success or negative error
*
* int bpf_clone_redirect(skb, ifindex, flags)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3d4c3b5e1868..0c977b6e0f8b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample,
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
}
- printf("0x%"PRIx64, from);
+ printf(" 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
if (alt.map && !alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
- printf("0x%"PRIx64, from);
+ printf(" 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index be09d77cade0..a971caf3759d 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -685,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
{
struct symbol *sym = node->sym;
u64 left, right;
+ struct dso *left_dso = NULL;
+ struct dso *right_dso = NULL;
if (callchain_param.key == CCKEY_SRCLINE) {
enum match_result match = match_chain_srcline(node, cnode);
@@ -696,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
left = cnode->ms.sym->start;
right = sym->start;
+ left_dso = cnode->ms.map->dso;
+ right_dso = node->map->dso;
} else {
left = cnode->ip;
right = node->ip;
}
- if (left == right) {
+ if (left == right && left_dso == right_dso) {
if (node->branch) {
cnode->branch_count++;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f6257fb4f08c..39b15968eab1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
- char *name, struct cpu_map *cpus,
+ char *name, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats)
{
struct perf_evsel *evsel;
+ struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
event_attr_init(attr);
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
- evsel->system_wide = !!cpus;
+ evsel->system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats);
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
return evsel ? 0 : -ENOMEM;
}
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
evsel = __add_event(list, &parse_state->idx, &attr,
- get_config_name(head_config), pmu->cpus,
+ get_config_name(head_config), pmu,
&config_terms, auto_merge_stats);
if (evsel) {
evsel->unit = info.unit;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index ac16a9db1fb5..1c4d7b4e4fb5 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
closedir(dir);
}
+static struct cpu_map *__pmu_cpumask(const char *path)
+{
+ FILE *file;
+ struct cpu_map *cpus;
+
+ file = fopen(path, "r");
+ if (!file)
+ return NULL;
+
+ cpus = cpu_map__read(file);
+ fclose(file);
+ return cpus;
+}
+
+/*
+ * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
+ * may have a "cpus" file.
+ */
+#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
+#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
+
static struct cpu_map *pmu_cpumask(const char *name)
{
- struct stat st;
char path[PATH_MAX];
- FILE *file;
struct cpu_map *cpus;
const char *sysfs = sysfs__mountpoint();
const char *templates[] = {
- "%s/bus/event_source/devices/%s/cpumask",
- "%s/bus/event_source/devices/%s/cpus",
- NULL
+ CPUS_TEMPLATE_UNCORE,
+ CPUS_TEMPLATE_CPU,
+ NULL
};
const char **template;
@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
for (template = templates; *template; template++) {
snprintf(path, PATH_MAX, *template, sysfs, name);
- if (stat(path, &st) == 0)
- break;
+ cpus = __pmu_cpumask(path);
+ if (cpus)
+ return cpus;
}
- if (!*template)
- return NULL;
+ return NULL;
+}
- file = fopen(path, "r");
- if (!file)
- return NULL;
+static bool pmu_is_uncore(const char *name)
+{
+ char path[PATH_MAX];
+ struct cpu_map *cpus;
+ const char *sysfs = sysfs__mountpoint();
- cpus = cpu_map__read(file);
- fclose(file);
- return cpus;
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
+ cpus = __pmu_cpumask(path);
+ cpu_map__put(cpus);
+
+ return !!cpus;
}
/*
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->cpus = pmu_cpumask(name);
+ pmu->is_uncore = pmu_is_uncore(name);
+
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 389e9729331f..fe0de0502ce2 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -22,6 +22,7 @@ struct perf_pmu {
char *name;
__u32 type;
bool selectable;
+ bool is_uncore;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 0f5e347b068d..152823b6cb21 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests
include ../lib.mk
override define RUN_TESTS
- $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
- $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
+ @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
+ @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
endef
override define EMIT_TESTS
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index 00f286661dcd..dd4162fc0419 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -341,7 +341,7 @@ int main(int argc, char **argv)
return 0;
case 'n':
t = atoi(optarg);
- if (t > ARRAY_SIZE(test_cases))
+ if (t >= ARRAY_SIZE(test_cases))
error(1, 0, "Invalid test case: %d", t);
all_tests = false;
test_cases[t].enabled = true;
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index a2c53a3d223d..de2f9ec8a87f 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
}
}
-static int copy_page(int ufd, unsigned long offset)
+static int __copy_page(int ufd, unsigned long offset, bool retry)
{
struct uffdio_copy uffdio_copy;
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset)
fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
uffdio_copy.copy), exit(1);
} else {
- if (test_uffdio_copy_eexist) {
+ if (test_uffdio_copy_eexist && retry) {
test_uffdio_copy_eexist = false;
retry_copy_page(ufd, &uffdio_copy, offset);
}
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset)
return 0;
}
+static int copy_page_retry(int ufd, unsigned long offset)
+{
+ return __copy_page(ufd, offset, true);
+}
+
+static int copy_page(int ufd, unsigned long offset)
+{
+ return __copy_page(ufd, offset, false);
+}
+
static void *uffd_poll_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
@@ -544,7 +554,7 @@ static void *background_thread(void *arg)
for (page_nr = cpu * nr_pages_per_cpu;
page_nr < (cpu+1) * nr_pages_per_cpu;
page_nr++)
- copy_page(uffd, page_nr * page_size);
+ copy_page_retry(uffd, page_nr * page_size);
return NULL;
}
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd,
}
}
-static int uffdio_zeropage(int ufd, unsigned long offset)
+static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
{
struct uffdio_zeropage uffdio_zeropage;
int ret;
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
uffdio_zeropage.zeropage), exit(1);
} else {
- if (test_uffdio_zeropage_eexist) {
+ if (test_uffdio_zeropage_eexist && retry) {
test_uffdio_zeropage_eexist = false;
retry_uffdio_zeropage(ufd, &uffdio_zeropage,
offset);
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
return 0;
}
+static int uffdio_zeropage(int ufd, unsigned long offset)
+{
+ return __uffdio_zeropage(ufd, offset, false);
+}
+
/* exercise UFFDIO_ZEROPAGE */
static int userfaultfd_zeropage_test(void)
{
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 97f187e2663f..0a74a20ca32b 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
+CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
UNAME_M := $(shell uname -m)
CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)