summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-10-30 14:10:01 +0900
committerDavid S. Miller <davem@davemloft.net>2017-10-30 21:09:24 +0900
commite1ea2f9856b765a2eaabb403a6751f70efc9ba4c (patch)
tree771f0f96fdab1b27757730e96d911c73f5499ee4
parentaad93c70b9a3b80dbc383a31e77a119f69bdd856 (diff)
parent0b07194bb55ed836c2cc7c22e866b87a14681984 (diff)
downloadlinux-e1ea2f9856b765a2eaabb403a6751f70efc9ba4c.tar.gz
linux-e1ea2f9856b765a2eaabb403a6751f70efc9ba4c.tar.bz2
linux-e1ea2f9856b765a2eaabb403a6751f70efc9ba4c.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several conflicts here. NFP driver bug fix adding nfp_netdev_is_nfp_repr() check to nfp_fl_output() needed some adjustments because the code block is in an else block now. Parallel additions to net/pkt_cls.h and net/sch_generic.h A bug fix in __tcp_retransmit_skb() conflicted with some of the rbtree changes in net-next. The tc action RCU callback fixes in 'net' had some overlap with some of the recent tcf_block reworking. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity-as39358
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power4
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/as3935.txt5
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt6
-rw-r--r--Documentation/kbuild/makefiles.txt31
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile12
-rw-r--r--arch/alpha/kernel/sys_alcor.c4
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c12
-rw-r--r--arch/alpha/kernel/sys_dp264.c20
-rw-r--r--arch/alpha/kernel/sys_eb64p.c4
-rw-r--r--arch/alpha/kernel/sys_eiger.c4
-rw-r--r--arch/alpha/kernel/sys_miata.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c4
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c6
-rw-r--r--arch/alpha/kernel/sys_rawhide.c4
-rw-r--r--arch/alpha/kernel/sys_ruffian.c6
-rw-r--r--arch/alpha/kernel/sys_rx164.c4
-rw-r--r--arch/alpha/kernel/sys_sable.c10
-rw-r--r--arch/alpha/kernel/sys_sio.c8
-rw-r--r--arch/alpha/kernel/sys_sx164.c4
-rw-r--r--arch/alpha/kernel/sys_takara.c6
-rw-r--r--arch/alpha/kernel/sys_wildfire.c4
-rw-r--r--arch/arc/boot/dts/hsdk.dts11
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/kernel/smp.c5
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arc/plat-hsdk/platform.c10
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/events/intel/bts.c6
-rw-r--r--arch/x86/include/asm/tlbflush.h21
-rw-r--r--arch/x86/kernel/amd_nb.c41
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c19
-rw-r--r--arch/x86/kernel/head32.c5
-rw-r--r--arch/x86/kernel/unwind_orc.c29
-rw-r--r--arch/x86/mm/tlb.c64
-rw-r--r--drivers/android/binder.c11
-rw-r--r--drivers/android/binder_alloc.c24
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/power/domain_governor.c53
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/power/sysfs.c25
-rw-r--r--drivers/block/nbd.c13
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/hv/channel_mgmt.c5
-rw-r--r--drivers/hwmon/da9052-hwmon.c5
-rw-r--r--drivers/hwmon/tmp102.c13
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c45
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c10
-rw-r--r--drivers/iio/proximity/as3935.c43
-rw-r--r--drivers/infiniband/core/netlink.c13
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/rmi4/rmi_f30.c5
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c43
-rw-r--r--drivers/irqchip/irq-tango.c2
-rw-r--r--drivers/net/can/sun4i_can.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/tap.c23
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/nvme/host/fc.c37
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c18
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c82
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c115
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/linit.c7
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/spi/spi-armada-3700.c145
-rw-r--r--drivers/spi/spi-bcm-qspi.c9
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/iio/meter/ade7759.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c19
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c23
-rw-r--r--drivers/usb/host/xhci-ring.c21
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/musb/musb_core.c21
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c94
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-balloon.c19
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/cifs/Kconfig5
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c31
-rw-r--r--fs/cifs/smb2pdu.c33
-rw-r--r--fs/cifs/smb2pdu.h5
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2transport.c26
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/overlayfs/inode.c20
-rw-r--r--fs/overlayfs/namei.c32
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/readdir.c11
-rw-r--r--fs/overlayfs/super.c3
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--include/linux/if_tap.h4
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/sctp.h34
-rw-r--r--include/linux/swait.h27
-rw-r--r--include/net/fq_impl.h9
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/strparser.h3
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/uapi/linux/bpf.h6
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/spi/spidev.h1
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/irq/generic-chip.c15
-rw-r--r--kernel/workqueue.c37
-rw-r--r--lib/assoc_array.c51
-rw-r--r--net/core/filter.c32
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dsa/dsa2.c7
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/ipip.c59
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/key.c37
-rw-r--r--net/rds/ib_send.c16
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/cls_api.c78
-rw-r--r--net/sched/cls_basic.c20
-rw-r--r--net/sched/cls_bpf.c19
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_flower.c20
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c19
-rw-r--r--net/sched/cls_route.c19
-rw-r--r--net/sched/cls_rsvp.h19
-rw-r--r--net/sched/cls_tcindex.c38
-rw-r--r--net/sched/cls_u32.c29
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/input.c22
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c32
-rw-r--r--net/sctp/stream.c26
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/strparser/strparser.c17
-rw-r--r--net/sunrpc/xprt.c36
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/wireless/sme.c50
-rw-r--r--net/xfrm/xfrm_policy.c16
-rw-r--r--net/xfrm/xfrm_user.c25
-rw-r--r--samples/trace_events/trace-events-sample.c2
-rw-r--r--scripts/Makefile.modpost1
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c6
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Makefile43
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--security/apparmor/file.c30
-rw-r--r--security/apparmor/include/audit.h26
-rw-r--r--security/apparmor/include/net.h114
-rw-r--r--security/apparmor/include/perms.h5
-rw-r--r--security/apparmor/include/policy.h13
-rw-r--r--security/apparmor/lib.c5
-rw-r--r--security/apparmor/lsm.c387
-rw-r--r--security/apparmor/net.c184
-rw-r--r--security/apparmor/policy_unpack.c47
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--tools/include/uapi/linux/bpf.h4
-rw-r--r--tools/objtool/check.c9
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh9
-rw-r--r--tools/perf/ui/hist.c9
-rw-r--r--tools/perf/util/parse-events.l17
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/xyarray.h4
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/scripts/Makefile.include6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json23
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py20
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py62
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py2
252 files changed, 2322 insertions, 2083 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
index 33e96f740639..147d4e8a1403 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
@@ -14,3 +14,11 @@ Description:
Show or set the gain boost of the amp, from 0-31 range.
18 = indoors (default)
14 = outdoors
+
+What /sys/bus/iio/devices/iio:deviceX/noise_level_tripped
+Date: May 2017
+KernelVersion: 4.13
+Contact: Matt Ranostay <matt.ranostay@konsulko.com>
+Description:
+ When 1 the noise level is over the trip level and not reporting
+ valid data
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 676fdf5f2a99..5cbb6f038615 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -211,7 +211,9 @@ Description:
device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that
- the PM QoS resume latency may be arbitrary.
+ the PM QoS resume latency may be arbitrary and the special value
+ "n/a" means that user space cannot accept any resume latency at
+ all for the given device.
Not all drivers support this attribute. If it isn't supported,
it is not present.
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index 38d74314b7ab..b6c1afa6f02d 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -16,6 +16,10 @@ Optional properties:
- ams,tuning-capacitor-pf: Calibration tuning capacitor stepping
value 0 - 120pF. This will require using the calibration data from
the manufacturer.
+ - ams,nflwdth: Set the noise and watchdog threshold register on
+ startup. This will need to set according to the noise from the
+ MCU board, and possibly the local environment. Refer to the
+ datasheet for the threshold settings.
Example:
@@ -27,4 +31,5 @@ as3935@0 {
interrupt-parent = <&gpio1>;
interrupts = <16 1>;
ams,tuning-capacitor-pf = <80>;
+ ams,nflwdth = <0x44>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cdab0ea5..5eb108e180fa 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -99,7 +99,7 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x200000>;
+ reg = <0x0 0x2c200000 0 0x20000>;
};
};
@@ -124,14 +124,14 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x200000>;
+ reg = <0x0 0x2c200000 0 0x20000>;
};
gic-its@2c400000 {
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c400000 0 0x200000>;
+ reg = <0x0 0x2c400000 0 0x20000>;
};
ppi-partitions {
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 329e740adea7..f6f80380dff2 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
ld
Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
- objcopy
- Copy binary. Uses OBJCOPYFLAGS usually specified in
- arch/$(ARCH)/Makefile.
- OBJCOPYFLAGS_$@ may be used to set additional options.
-
- gzip
- Compress target. Use maximum compression to compress target.
-
Example:
#arch/x86/boot/Makefile
LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
resulting in the target file being recompiled for no
obvious reason.
+ objcopy
+ Copy binary. Uses OBJCOPYFLAGS usually specified in
+ arch/$(ARCH)/Makefile.
+ OBJCOPYFLAGS_$@ may be used to set additional options.
+
+ gzip
+ Compress target. Use maximum compression to compress target.
+
+ Example:
+ #arch/x86/boot/compressed/Makefile
+ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,gzip)
+
dtc
Create flattened device tree blob object suitable for linking
into vmlinux. Device tree blobs linked into vmlinux are placed
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
- See "7.3 generic-y" for further info on syntax etc.
+ See "7.2 generic-y" for further info on syntax etc.
--- 6.11 Post-link pass
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
arch/<arch>/include/asm/ to list asm files coming from asm-generic.
See subsequent chapter for the syntax of the Kbuild file.
- --- 7.1 no-export-headers
+--- 7.1 no-export-headers
no-export-headers is essentially used by include/uapi/linux/Kbuild to
avoid exporting specific headers (e.g. kvm.h) on architectures that do
not support it. It should be avoided as much as possible.
- --- 7.2 generic-y
+--- 7.2 generic-y
If an architecture uses a verbatim copy of a header from
include/asm-generic then this is listed in the file
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
Example: termios.h
#include <asm-generic/termios.h>
- --- 7.3 generated-y
+--- 7.3 generated-y
If an architecture generates other header files alongside generic-y
wrappers, generated-y specifies them.
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
#arch/x86/include/asm/Kbuild
generated-y += syscalls_32.h
- --- 7.5 mandatory-y
+--- 7.4 mandatory-y
mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
to define the minimum set of headers that must be exported in
diff --git a/MAINTAINERS b/MAINTAINERS
index 3e95dd7db02c..c4d21b302409 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9220,7 +9220,6 @@ F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
M: Bin Liu <b-liu@ti.com>
L: linux-usb@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/musb/
@@ -10187,7 +10186,6 @@ F: Documentation/parport*.txt
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
-M: Chris Wright <chrisw@sous-sol.org>
M: Alok Kataria <akataria@vmware.com>
M: Rusty Russell <rusty@rustcorp.com.au>
L: virtualization@lists.linux-foundation.org
@@ -10567,6 +10565,8 @@ M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+R: Jiri Olsa <jolsa@redhat.com>
+R: Namhyung Kim <namhyung@kernel.org>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
diff --git a/Makefile b/Makefile
index 46bfb0ed2257..5f91a28a3cea 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -130,8 +130,8 @@ endif
ifneq ($(KBUILD_OUTPUT),)
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
-$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT))
-KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT))
+KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
+ && /bin/pwd)
$(if $(KBUILD_OUTPUT),, \
$(error failed to create output directory "$(saved-output)"))
@@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@@ -1399,7 +1399,7 @@ help:
@echo ' Build, install, and boot kernel before'
@echo ' running kselftest on it'
@echo ' kselftest-clean - Remove all generated kselftest files'
- @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed'
+ @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
@echo ' .config.'
@echo ''
@echo 'Userspace tools targets:'
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 118dc6af1805..7ad074fd5ab5 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -181,10 +181,10 @@ alcor_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSEL 17 is XLT only */
{16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 4c50f8f40cbb..c0fa1fe5ce77 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -173,10 +173,10 @@ pc164_init_irq(void)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
-static inline int __init
+static inline int
eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
{16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
@@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
-static inline int __init
+static inline int
cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
{ 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
@@ -287,10 +287,10 @@ cia_cab_init_pci(void)
*
*/
-static inline int __init
+static inline int
alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
{ 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 6c35159bc00e..9e1e40ea1d14 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -356,7 +356,7 @@ clipper_init_irq(void)
* 10 64 bit PCI option slot 3 (not bus 0)
*/
-static int __init
+static int
isa_irq_fixup(const struct pci_dev *dev, int irq)
{
u8 irq8;
@@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
return irq8 & 0xf;
}
-static int __init
+static int
dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[6][5] __initdata = {
+ static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, irq);
}
-static int __init
+static int
monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[13][5] __initdata = {
+ static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 4 unused */
@@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
-static u8 __init
+static u8
monet_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
@@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
return slot;
}
-static int __init
+static int
webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[13][5] __initdata = {
+ static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 unused */
@@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
-static int __init
+static int
clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
{ 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ad40a425e841..372661c56537 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -167,10 +167,10 @@ eb64p_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
{16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 15f42083bdb3..2731738b5872 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -141,7 +141,7 @@ eiger_init_irq(void)
}
}
-static int __init
+static int
eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 irq_orig;
@@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq_orig - 0x80;
}
-static u8 __init
+static u8
eiger_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index d5b9776a608d..731d693fa1f9 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -149,10 +149,10 @@ miata_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[18][5] __initdata = {
+ static char irq_tab[18][5] = {
/*INT INTA INTB INTC INTD */
{16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
{ -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
@@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
miata_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 5e82dc1ad6f2..350ec9c8335b 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -145,10 +145,10 @@ mikasa_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[8][5] __initdata = {
+ static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 8ae04a121186..d019e4ce07bd 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -62,7 +62,7 @@ nautilus_init_irq(void)
common_init_isa_dma();
}
-static int __init
+static int
nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/* Preserve the IRQ set up by the console. */
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 063e594fd969..2301678d9f9d 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -193,10 +193,10 @@ noritake_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[15][5] __initdata = {
+ static char irq_tab[15][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSELs 16, 17, and 25 are CORELLE only */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
@@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
noritake_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index dfd510ae5d8c..546822d07dc7 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -221,10 +221,10 @@ rawhide_init_irq(void)
*
*/
-static int __init
+static int
rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
{ 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index a3f485257170..3b35e1913492 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -117,10 +117,10 @@ ruffian_kill_arch (int mode)
*
*/
-static int __init
+static int
ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[11][5] __initdata = {
+ static char irq_tab[11][5] = {
/*INT INTA INTB INTC INTD */
{-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
{-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
@@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 08ee737d4fba..e178007107ef 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -142,7 +142,7 @@ rx164_init_irq(void)
*
*/
-static int __init
+static int
rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
#if 0
@@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{ 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
};
#else
- static char irq_tab[6][5] __initdata = {
+ static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
{ 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 8a0aa6d67b53..86d259c2612d 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -192,10 +192,10 @@ sable_init_irq(void)
* with the values in the irq swizzling tables above.
*/
-static int __init
+static int
sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[9][5] __initdata = {
+ static char irq_tab[9][5] = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
@@ -374,10 +374,10 @@ lynx_init_irq(void)
* with the values in the irq swizzling tables above.
*/
-static int __init
+static int
lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[19][5] __initdata = {
+ static char irq_tab[19][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
@@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index febd24eba7a6..9fd2895639d5 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
outb((level_bits >> 8) & 0xff, 0x4d1);
}
-static inline int __init
+static inline int
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
@@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* that they use the default INTA line, if they are interrupt
* driven at all).
*/
- static char irq_tab[][5] __initdata = {
+ static char irq_tab[][5] = {
/*INT A B C D */
{ 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
@@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq >= 0 ? tmp : -1;
}
-static inline int __init
+static inline int
p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[][5] __initdata = {
+ static char irq_tab[][5] = {
/*INT A B C D */
{ 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index d063b360efed..23eee54d714a 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -94,10 +94,10 @@ sx164_init_irq(void)
* 9 32 bit PCI option slot 3
*/
-static int __init
+static int
sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
{ 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index dd0f1eae3c68..9101f2bb6176 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -155,10 +155,10 @@ takara_init_irq(void)
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
-static int __init
+static int
takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[15][5] __initdata = {
+ static char irq_tab[15][5] = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
@@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
takara_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot = PCI_SLOT(dev->devfn);
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index ee1874887776..c3f8b79fe214 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector)
* 7 64 bit PCI 1 option slot 7
*/
-static int __init
+static int
wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[8][5] __initdata = {
+ static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
{ 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8adde1b492f1..8f627c200d60 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -137,14 +137,15 @@
/*
* DW sdio controller has external ciu clock divider
* controlled via register in SDIO IP. Due to its
- * unexpected default value (it should devide by 1
- * but it devides by 8) SDIO IP uses wrong clock and
+ * unexpected default value (it should divide by 1
+ * but it divides by 8) SDIO IP uses wrong clock and
* works unstable (see STAR 9001204800)
+ * We switched to the minimum possible value of the
+ * divisor (div-by-2) in HSDK platform code.
* So add temporary fix and change clock frequency
- * from 100000000 to 12500000 Hz until we fix dw sdio
- * driver itself.
+ * to 50000000 Hz until we fix dw sdio driver itself.
*/
- clock-frequency = <12500000>;
+ clock-frequency = <50000000>;
#clock-cells = <0>;
};
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 15f0f6b5fec1..7b8f8faf8a24 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -63,7 +63,6 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_RESET_HSDK=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f46267153ec2..6df9d94a9537 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -23,6 +23,8 @@
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/irqdomain.h>
+#include <linux/export.h>
+
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -30,6 +32,9 @@
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
+EXPORT_SYMBOL_GPL(smp_bitops_lock);
#endif
struct plat_smp_ops __weak plat_smp_ops;
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index bd08de4be75e..19ab3cf98f0f 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -8,3 +8,4 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
select CLK_HSDK
+ select RESET_HSDK
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 744e62e58788..fd0ae5e38639 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -74,6 +74,10 @@ static void __init hsdk_set_cpu_freq_1ghz(void)
pr_err("Failed to setup CPU frequency to 1GHz!");
}
+#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
+#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
+#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+
static void __init hsdk_init_early(void)
{
/*
@@ -90,6 +94,12 @@ static void __init hsdk_init_early(void)
writel(1, (void __iomem *) CREG_PAE_UPDATE);
/*
+ * Switch SDIO external ciu clock divider from default div-by-8 to
+ * minimum possible div-by-2.
+ */
+ iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+ /*
* Setup CPU frequency to 1GHz.
* TODO: remove it after smart hsdk pll driver will be introduced.
*/
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e71eefa2e427..0641ba54ab62 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,7 +1,7 @@
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/export.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
- return H_PARAMETER;
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+ }
entry = ioba >> stt->page_shift;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- if (dir == DMA_NONE) {
+ if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
stit->tbl, entry);
- } else {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
entry, ua, dir);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- }
if (ret == H_SUCCESS)
continue;
if (ret == H_TOO_HARD)
- return ret;
+ goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
kvmppc_tce_put(stt, entry, tce);
- return H_SUCCESS;
+unlock_exit:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ec69fa45d5a2..42639fba89e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
- stdcix r11,r9,r10
eieio
+ stdcix r11,r9,r10
lwz r11, VCPU_XIVE_CAM_WORD(r4)
li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10
li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4)
+ eieio
no_xive:
#endif /* CONFIG_KVM_XICS */
@@ -1310,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
+ lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
@@ -1400,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzx r11, r7, r10
eieio
+ lwzx r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldx r11, r6, r10
b 3f
@@ -1409,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzcix r11, r7, r10
eieio
+ lwzcix r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldcix r11, r6, r10
3: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1420,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+ eieio
1:
#endif /* CONFIG_KVM_XICS */
/* Save more register state */
@@ -2788,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
+BEGIN_FTR_SECTION
+ PPC_MSGSYNC
+ lwsync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3480faaf1ef8..ee279c7f4802 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_PPC_HTM:
- r = cpu_has_feature(CPU_FTR_TM_COMP) &&
- is_kvmppc_hv_enabled(kvm);
+ r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
break;
default:
r = 0;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 21900e1cee9c..d185aa3965bf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
tmhh %r8,0x0001 # test problem state bit
jnz 2f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
- # cleanup critical section for sie64a
+ # cleanup critical section for program checks in sie64a
lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start)
clg %r14,BASED(.Lsie_critical_length)
jhe 0f
- brasl %r14,.Lcleanup_sie
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 49167258d587..f6cdb7a1455e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
- UNWIND_HINT_IRET_REGS offset=8
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
/* Sanity check */
.if \shift_ist != -1 && \paranoid == 0
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 16076eb34699..141e07b06216 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
if (event->attr.type != bts_pmu.type)
return -ENOENT;
- if (x86_add_exclusive(x86_lbr_exclusive_bts))
- return -EBUSY;
-
/*
* BTS leaks kernel addresses even when CPL0 tracing is
* disabled, so disallow intel_bts driver for unprivileged
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
!capable(CAP_SYS_ADMIN))
return -EACCES;
+ if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ return -EBUSY;
+
ret = x86_reserve_hardware();
if (ret) {
x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d362161d3291..c4aed0de565e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -82,12 +82,21 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
-/*
- * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
- * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
- * it's false, then we immediately switch CR3 when entering a kernel thread.
- */
-DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+static inline bool tlb_defer_switch_to_init_mm(void)
+{
+ /*
+ * If we have PCID, then switching to init_mm is reasonably
+ * fast. If we don't have PCID, then switching to init_mm is
+ * quite slow, so we try to defer it in the hopes that we can
+ * avoid it entirely. The latter approach runs the risk of
+ * receiving otherwise unnecessary IPIs.
+ *
+ * This choice is just a heuristic. The tlb code can handle this
+ * function returning true or false regardless of whether we have
+ * PCID.
+ */
+ return !static_cpu_has(X86_FEATURE_PCID);
+}
/*
* 6 because 6 should be plenty and struct tlb_state will fit in
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 458da8509b75..6db28f17ff28 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
{}
};
+#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
+
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
@@ -402,11 +406,48 @@ void amd_flush_garts(void)
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
+static void __fix_erratum_688(void *info)
+{
+#define MSR_AMD64_IC_CFG 0xC0011021
+
+ msr_set_bit(MSR_AMD64_IC_CFG, 3);
+ msr_set_bit(MSR_AMD64_IC_CFG, 14);
+}
+
+/* Apply erratum 688 fix so machines without a BIOS fix work. */
+static __init void fix_erratum_688(void)
+{
+ struct pci_dev *F4;
+ u32 val;
+
+ if (boot_cpu_data.x86 != 0x14)
+ return;
+
+ if (!amd_northbridges.num)
+ return;
+
+ F4 = node_to_amd_nb(0)->link;
+ if (!F4)
+ return;
+
+ if (pci_read_config_dword(F4, 0x164, &val))
+ return;
+
+ if (val & BIT(2))
+ return;
+
+ on_each_cpu(__fix_erratum_688, NULL, 0);
+
+ pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
+}
+
static __init int init_amd_nbs(void)
{
amd_cache_northbridges();
amd_cache_gart();
+ fix_erratum_688();
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 24f749324c0f..9990a71e311f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
- this_leaf = this_cpu_ci->info_list + index;
nshared = base->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8f7a9bbad514..7dbcb7adf797 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
+static bool is_blacklisted(unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
+ pr_err_once("late loading on model 79 is disabled.\n");
+ return true;
+ }
+
+ return false;
+}
+
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index cf2ce063f65a..2902ca4d5993 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
- cr4_init_shadow();
-
+ /* Make sure IDT is set up before any exception happens */
idt_setup_early_handler();
+ cr4_init_shadow();
+
sanitize_boot_params(&boot_params);
x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 570b70d3f604..b95007e7c1b3 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
if (unlikely((idx >= lookup_num_blocks-1))) {
- orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n",
- idx, lookup_num_blocks, ip);
+ orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
+ idx, lookup_num_blocks, (void *)ip);
return NULL;
}
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
(__start_orc_unwind + stop > __stop_orc_unwind))) {
- orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n",
- idx, lookup_num_blocks, start, stop, ip);
+ orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
+ idx, lookup_num_blocks, start, stop, (void *)ip);
return NULL;
}
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R10:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg R10 at ip %p\n",
+ orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R13:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg R13 at ip %p\n",
+ orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DI:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg DI at ip %p\n",
+ orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DX:
if (!state->regs || !state->full_regs) {
- orc_warn("missing regs for base reg DX at ip %p\n",
+ orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
goto done;
}
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown SP base reg %d for ip %p\n",
+ orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
goto done;
}
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
- orc_warn("can't dereference registers at %p for ip %p\n",
+ orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS_IRET:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
- orc_warn("can't dereference iret registers at %p for ip %p\n",
+ orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown .orc_unwind entry type %d\n", orc->type);
+ orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
+ orc->type, (void *)orig_ip);
break;
}
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
- orc_warn("unknown BP base reg %d for ip %p\n",
+ orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
goto done;
}
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->stack_info.type == prev_type &&
on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
state->sp <= prev_sp) {
- orc_warn("stack going in the wrong direction? ip=%p\n",
+ orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
goto done;
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 658bf0090565..0f3d0cea4d00 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,7 +30,6 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
-DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
@@ -147,8 +146,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) {
- VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
- next->context.ctx_id);
+ VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+ next->context.ctx_id);
/*
* We don't currently support having a real mm loaded without
@@ -213,6 +212,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/*
+ * Please ignore the name of this function. It should be called
+ * switch_to_kernel_thread().
+ *
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
* kernel thread or other context without an mm. Acceptable implementations
* include doing nothing whatsoever, switching to init_mm, or various clever
@@ -227,7 +229,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (static_branch_unlikely(&tlb_use_lazy_mode)) {
+ if (tlb_defer_switch_to_init_mm()) {
/*
* There's a significant optimization that may be possible
* here. We have accurate enough TLB flush tracking that we
@@ -626,57 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);
-
-static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char buf[2];
-
- buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
- buf[1] = '\n';
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t tlblazy_write_file(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- bool val;
-
- if (kstrtobool_from_user(user_buf, count, &val))
- return -EINVAL;
-
- if (val)
- static_branch_enable(&tlb_use_lazy_mode);
- else
- static_branch_disable(&tlb_use_lazy_mode);
-
- return count;
-}
-
-static const struct file_operations fops_tlblazy = {
- .read = tlblazy_read_file,
- .write = tlblazy_write_file,
- .llseek = default_llseek,
-};
-
-static int __init init_tlb_use_lazy_mode(void)
-{
- if (boot_cpu_has(X86_FEATURE_PCID)) {
- /*
- * Heuristic: with PCID on, switching to and from
- * init_mm is reasonably fast, but remote flush IPIs
- * as expensive as ever, so turn off lazy TLB mode.
- *
- * We can't do this in setup_pcid() because static keys
- * haven't been initialized yet, and it would blow up
- * badly.
- */
- static_branch_disable(&tlb_use_lazy_mode);
- }
-
- debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
- arch_debugfs_dir, NULL, &fops_tlblazy);
- return 0;
-}
-late_initcall(init_tlb_use_lazy_mode);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 0621a95b8597..fddf76ef5bd6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3662,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
}
}
-static int binder_has_thread_work(struct binder_thread *thread)
-{
- return !binder_worklist_empty(thread->proc, &thread->todo) ||
- thread->looper_need_return;
-}
-
static int binder_put_node_cmd(struct binder_proc *proc,
struct binder_thread *thread,
void __user **ptrp,
@@ -4297,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
binder_inner_proc_unlock(thread->proc);
- if (binder_has_work(thread, wait_for_proc_work))
- return POLLIN;
-
poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
+ if (binder_has_work(thread, wait_for_proc_work))
return POLLIN;
return 0;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 064f5e31ec55..c2819a3d58a6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm)
- mm = get_task_mm(alloc->tsk);
+ if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
- prev->data, next->data);
+ prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ mmgrab(alloc->vma_vm_mm);
return 0;
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
@@ -926,9 +923,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
vma = alloc->vma;
if (vma) {
- mm = get_task_mm(alloc->tsk);
- if (!mm)
- goto err_get_task_mm_failed;
+ if (!mmget_not_zero(alloc->vma_vm_mm))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@@ -963,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_down_write_mmap_sem_failed:
mmput_async(mm);
-err_get_task_mm_failed:
+err_mmget:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
@@ -1002,7 +999,6 @@ struct shrinker binder_shrinker = {
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index a3a3602c689c..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,6 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 321cd7b4d817..227bac5f1191 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
- dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
+ dev_pm_qos_expose_latency_limit(&cpu->dev,
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 281f949c5ffe..51751cc8c9e6 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,23 +14,20 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
+ s64 constraint_ns = -1;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
- if (constraint_ns < 0) {
+ if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return 0;
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ constraint_ns *= NSEC_PER_USEC;
+
+ if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
*constraint_ns_p = constraint_ns;
return 0;
@@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev)
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (constraint_ns < 0)
+ if (constraint_ns == 0)
return false;
- constraint_ns *= NSEC_PER_USEC;
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ constraint_ns = -1;
+ else
+ constraint_ns *= NSEC_PER_USEC;
+
/*
* We can walk the children without any additional locking, because
* they all have been suspended at this point and their
@@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns > 0) {
- constraint_ns -= td->suspend_latency_ns +
- td->resume_latency_ns;
- if (constraint_ns == 0)
- return false;
+ if (constraint_ns < 0) {
+ /* The children have no constraints. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ td->cached_suspend_ok = true;
+ } else {
+ constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
+ if (constraint_ns > 0) {
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
+ } else {
+ td->effective_constraint_ns = 0;
+ }
}
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
/* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0) {
+ if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
continue;
+ constraint_ns *= NSEC_PER_USEC;
+
/*
* constraint_ns cannot be negative here, because the device has
* been suspended.
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 277d43a83f53..7d29286d9313 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..13e015905543 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) < 0)
+ else if (__dev_pm_qos_read_value(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 156ab57bca77..632077f05c57 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sprintf(buf, "n/a\n");
+ else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_store(struct device *dev,
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value))
- return -EINVAL;
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
- if (value < 0)
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ value = 0;
+ } else {
return -EINVAL;
+ }
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index baebbdfd74d5..9adfb5445f8d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result;
}
+/*
+ * Different settings for sk->sk_sndtimeo can result in different return values
+ * if there is a signal pending when we enter sendmsg, because reasons?
+ */
+static inline int was_interrupted(int result)
+{
+ return result == -ERESTARTSYS || result == -EINTR;
+}
+
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
@@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
if (result <= 0) {
- if (result == -ERESTARTSYS) {
+ if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
@@ -502,7 +511,7 @@ send_pages:
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result <= 0) {
- if (result == -ERESTARTSYS) {
+ if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
* return BUSY.
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index a1df588343f2..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (clockevent_state_shutdown(&cs5535_clockevent))
+ if (clockevent_state_detached(&cs5535_clockevent) ||
+ clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 48eaf2879228..aa390404e85f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0;
}
- /* resume_latency is 0 means no restriction */
- if (resume_latency && resume_latency < latency_req)
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 1cb2d1c070c3..a94601d5939e 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_random_get_seed(sys_table);
- if (!nokaslr()) {
+ /* hibernation expects the runtime regions to stay in the same place */
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 08129b7b80ab..41c48a1e8baa 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
return -EFAULT;
+ if (qcaps.capsule_count == ULONG_MAX)
+ return -EINVAL;
+
capsules = kcalloc(qcaps.capsule_count + 1,
sizeof(efi_capsule_header_t), GFP_KERNEL);
if (!capsules)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 31db356476f8..430a6b4dfac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v6_0_resume(void *handle)
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v6_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c2743233ba10..b526f49be65d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
{
uint32_t reference_clock, tmp;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
uint32_t ref_clock;
uint32_t refresh_rate = 0;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
-
cgs_get_active_displays_info(hwmgr->device, &info);
num_active_displays = info.display_count;
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
data->frame_time_x2 = frame_time_in_us * 2 / 100;
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21c36e256884..d4726a3358a4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
+ if (!wa_ctx->per_ctx.valid)
+ return 0;
+
per_ctx_start[0] = 0x18800001;
per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..e5320b4eb698 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
-
- WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}
if (emulate_schedule_in)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2294466dd415..a5bed2e71b92 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
- unsigned int offset, void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
- return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
-}
-
-static int instdone_mmio_read(struct intel_vgpu *vgpu,
+static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+ if (HAS_BSD2(dev_priv)) \
+ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
#define MMIO_RING_D(prefix, d) \
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x6c)
- MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
- MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
+ MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
- MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
+ MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
+ mmio_read_from_hw, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
+ mmio_read_from_hw, NULL);
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
- intel_vgpu_reg_imr_handler);
-
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
- MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
- MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
- MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- ring_mode_mmio_write);
- MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
-
- MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+ mmio_read_from_hw, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
- ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
- ring_reset_ctl_write);
#undef RING_REG
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
- MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
#undef RING_REG
#define RING_REG(base) (base + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
- ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
- NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#define RING_REG(base) (base + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
- MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL | D_KBL);
MMIO_D(0x320f0, D_SKL | D_KBL);
- MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index fbd023a16f18..7d01c77a0f7a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -54,9 +54,6 @@
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
-#define _REG_VECS_EXCC 0x1A028
-#define _REG_VCS2_EXCC 0x1c028
-
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0d431a968a32..93a49eb0209e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
+ unsigned valid;
};
struct intel_shadow_wa_ctx {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 94185d610673..370b9d248fed 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
.poll = i915_perf_poll,
.read = i915_perf_read,
.unlocked_ioctl = i915_perf_ioctl,
+ /* Our ioctl have no arguments, so it's safe to use the same function
+ * to handle 32bits compatibility.
+ */
+ .compat_ioctl = i915_perf_ioctl,
};
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 018d2e0f8ec5..379b0df123be 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -937,7 +937,10 @@ void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
{
BUG_ON(!is_hvsock_channel(channel));
- channel->rescind = true;
+ /* We always get a rescind msg when a connection is closed. */
+ while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
+ msleep(1);
+
vmbus_device_unregister(channel->device_obj);
}
EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 97a62f5b9ea4..a973eb6a2890 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
/* disable touchscreen features */
da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
+ /* Sample every 1ms */
+ da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
+ DA9052_ADCCONT_ADCMODE,
+ DA9052_ADCCONT_ADCMODE);
+
err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
"tsiready-irq", da9052_tsi_datardy_irq,
hwmon);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5eafbaada795..dfc40c740d07 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
return err;
}
- tmp102->ready_time = jiffies;
- if (tmp102->config_orig & TMP102_CONF_SD) {
- /*
- * Mark that we are not ready with data until the first
- * conversion is complete
- */
- tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
- }
+ /*
+ * Mark that we are not ready with data until the first
+ * conversion is complete
+ */
+ tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
tmp102,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 57625653fcb6..1d13bf03c758 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -243,6 +243,8 @@ config DA9150_GPADC
config DLN2_ADC
tristate "Diolan DLN-2 ADC driver support"
depends on MFD_DLN2
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Diolan DLN-2 ADC.
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index bc5b38e3a147..a70ef7fec95f 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -225,6 +225,7 @@ struct at91_adc_trigger {
char *name;
unsigned int trgmod_value;
unsigned int edge_type;
+ bool hw_trig;
};
struct at91_adc_state {
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
.name = "external_rising",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
.edge_type = IRQ_TYPE_EDGE_RISING,
+ .hw_trig = true,
},
{
.name = "external_falling",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
.edge_type = IRQ_TYPE_EDGE_FALLING,
+ .hw_trig = true,
},
{
.name = "external_any",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
.edge_type = IRQ_TYPE_EDGE_BOTH,
+ .hw_trig = true,
+ },
+ {
+ .name = "software",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
+ .edge_type = IRQ_TYPE_NONE,
+ .hw_trig = false,
},
};
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev)
struct at91_adc_state *st;
struct resource *res;
int ret, i;
- u32 edge_type;
+ u32 edge_type = IRQ_TYPE_NONE;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
if (!indio_dev)
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node,
"atmel,trigger-edge-type", &edge_type);
if (ret) {
- dev_err(&pdev->dev,
- "invalid or missing value for atmel,trigger-edge-type\n");
- return ret;
+ dev_dbg(&pdev->dev,
+ "atmel,trigger-edge-type not specified, only software trigger available\n");
}
st->selected_trig = NULL;
- for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++)
+ /* find the right trigger, or no trigger at all */
+ for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
if (at91_adc_trigger_list[i].edge_type == edge_type) {
st->selected_trig = &at91_adc_trigger_list[i];
break;
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
- ret = at91_adc_buffer_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
- goto per_clk_disable_unprepare;
- }
+ if (st->selected_trig->hw_trig) {
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
- ret = at91_adc_trigger_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't setup the triggers.\n");
- goto per_clk_disable_unprepare;
+ ret = at91_adc_trigger_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't setup the triggers.\n");
+ goto per_clk_disable_unprepare;
+ }
}
ret = iio_device_register(indio_dev);
if (ret < 0)
goto per_clk_disable_unprepare;
- dev_info(&pdev->dev, "setting up trigger as %s\n",
- st->selected_trig->name);
+ if (st->selected_trig->hw_trig)
+ dev_info(&pdev->dev, "setting up trigger as %s\n",
+ st->selected_trig->name);
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd849f8..7ec2a0bb0807 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
st->event_en = state;
else
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index ebfb1de7377f..91431454eb85 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -865,7 +865,6 @@ complete:
static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
struct zpa2326_private *private)
{
- int ret;
unsigned int val;
long timeout;
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
/* Timed out. */
zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
timeout);
- ret = -ETIME;
- } else if (timeout < 0) {
- zpa2326_warn(indio_dev,
- "wait for one shot interrupt cancelled");
- ret = -ERESTARTSYS;
+ return -ETIME;
}
- return ret;
+ zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
+ return -ERESTARTSYS;
}
static int zpa2326_init_managed_irq(struct device *parent,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 0eeff29b61be..4a48b7ba3a1c 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -39,8 +39,12 @@
#define AS3935_AFE_GAIN_MAX 0x1F
#define AS3935_AFE_PWR_BIT BIT(0)
+#define AS3935_NFLWDTH 0x01
+#define AS3935_NFLWDTH_MASK 0x7f
+
#define AS3935_INT 0x03
#define AS3935_INT_MASK 0x0f
+#define AS3935_DISTURB_INT BIT(2)
#define AS3935_EVENT_INT BIT(3)
#define AS3935_NOISE_INT BIT(0)
@@ -48,6 +52,7 @@
#define AS3935_DATA_MASK 0x3F
#define AS3935_TUNE_CAP 0x08
+#define AS3935_DEFAULTS 0x3C
#define AS3935_CALIBRATE 0x3D
#define AS3935_READ_DATA BIT(14)
@@ -62,7 +67,9 @@ struct as3935_state {
struct mutex lock;
struct delayed_work work;
+ unsigned long noise_tripped;
u32 tune_cap;
+ u32 nflwdth_reg;
u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
u8 buf[2] ____cacheline_aligned;
};
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
return len;
}
+static ssize_t as3935_noise_level_tripped_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct as3935_state *st = iio_priv(dev_to_iio_dev(dev));
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ));
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
+static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO,
+ as3935_noise_level_tripped_show, NULL, 0);
static struct attribute *as3935_attributes[] = {
&iio_dev_attr_sensor_sensitivity.dev_attr.attr,
+ &iio_dev_attr_noise_level_tripped.dev_attr.attr,
NULL,
};
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work)
case AS3935_EVENT_INT:
iio_trigger_poll_chained(st->trig);
break;
+ case AS3935_DISTURB_INT:
case AS3935_NOISE_INT:
+ mutex_lock(&st->lock);
+ st->noise_tripped = jiffies;
+ mutex_unlock(&st->lock);
dev_warn(&st->spi->dev, "noise level is too high\n");
break;
}
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
static void calibrate_as3935(struct as3935_state *st)
{
- /* mask disturber interrupt bit */
- as3935_write(st, AS3935_INT, BIT(5));
-
+ as3935_write(st, AS3935_DEFAULTS, 0x96);
as3935_write(st, AS3935_CALIBRATE, 0x96);
as3935_write(st, AS3935_TUNE_CAP,
BIT(5) | (st->tune_cap / TUNE_CAP_DIV));
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
+ as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg);
}
#ifdef CONFIG_PM_SLEEP
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi)
return -EINVAL;
}
+ ret = of_property_read_u32(np,
+ "ams,nflwdth", &st->nflwdth_reg);
+ if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
+ dev_err(&spi->dev,
+ "invalid nflwdth setting of %d\n",
+ st->nflwdth_reg);
+ return -EINVAL;
+ }
+
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->channels = as3935_channels;
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi)
return -ENOMEM;
st->trig = trig;
+ st->noise_tripped = jiffies - HZ;
trig->dev.parent = indio_dev->dev.parent;
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &iio_interrupt_trigger_ops;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b12e58787c3d..1fb72c356e36 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
+ /*
+ * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
+ * mistakenly call the .dump() function.
+ */
+ if (index == RDMA_NL_LS) {
+ if (cb_table[op].doit)
+ return cb_table[op].doit(skb, nlh, extack);
+ return -EINVAL;
+ }
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
index == RDMA_NL_IWCM) {
struct netlink_dump_control c = {
.dump = cb_table[op].dump,
};
- return netlink_dump_start(nls, skb, nlh, &c);
+ if (c.dump)
+ return netlink_dump_start(nls, skb, nlh, &c);
+ return -EINVAL;
}
if (cb_table[op].doit)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0e761d079dc4..6d6b092e2da9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 34dfee555b20..82e0f0d43d55 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
unsigned int trackstick_button = BTN_LEFT;
bool button_mapped = false;
int i;
+ int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
f30->gpioled_key_map = devm_kcalloc(&fn->dev,
- f30->gpioled_count,
+ button_count,
sizeof(f30->gpioled_key_map[0]),
GFP_KERNEL);
if (!f30->gpioled_key_map) {
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
return -ENOMEM;
}
- for (i = 0; i < f30->gpioled_count; i++) {
+ for (i = 0; i < button_count; i++) {
if (!rmi_f30_is_valid_button(i, f30->ctrl))
continue;
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index b796e891e2ee..4b8b9d7aa75e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
- prefix = report[i];
-
- /* Skip over prefix */
- i++;
+ prefix = report[i++];
/* Determine data size and save the data in the proper variable */
- size = PREF_SIZE(prefix);
+ size = (1U << PREF_SIZE(prefix)) >> 1;
+ if (i + size > length) {
+ dev_err(ddev,
+ "Not enough data (need %d, have %d)\n",
+ i + size, length);
+ break;
+ }
+
switch (size) {
case 1:
data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
- case 3:
- size = 4;
+ case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e8d89343d613..e88395605e32 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -107,6 +107,10 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
+
/* Convert page order to size in bytes */
#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
- its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
}
static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
+ u64 baser_phys, tmp;
u32 alloc_pages;
void *base;
- u64 tmp;
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
if (!base)
return -ENOMEM;
+ baser_phys = virt_to_phys(base);
+
+ /* Check if the physical address of the memory is above 48bits */
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+ /* 52bit PA is supported only when PageSize=64K */
+ if (psz != SZ_64K) {
+ pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ /* Convert 52bit PA to 48bit field */
+ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+ }
+
retry_baser:
- val = (virt_to_phys(base) |
+ val = (baser_phys |
(type << GITS_BASER_TYPE_SHIFT) |
((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
@@ -1582,13 +1602,12 @@ retry_baser:
static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
- u32 psz, u32 *order)
+ u32 psz, u32 *order, u32 ids)
{
u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
- u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
continue;
case GITS_BASER_TYPE_DEVICE:
+ indirect = its_parse_indirect_baser(its, baser,
+ psz, &order,
+ its->device_ids);
case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
- psz, &order);
+ psz, &order,
+ ITS_MAX_VPEID_BITS);
break;
}
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+ return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
return -ENOMEM;
}
- BUG_ON(entries != vpe_proxy.dev->nr_ites);
+ BUG_ON(entries > vpe_proxy.dev->nr_ites);
raw_spin_lock_init(&vpe_proxy.lock);
vpe_proxy.next_victim = 0;
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index bdbb5c0ff7fe..0c085303a583 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
for (i = 0; i < 2; i++) {
ct[i].chip.irq_ack = irq_gc_ack_set_bit;
ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
- ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+ ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
ct[i].chip.irq_set_type = tangox_irq_set_type;
ct[i].chip.name = gc->domain->name;
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4cd821..b0c80859f746 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
- if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
- CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 18cc529fb807..9b18d96ef526 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->family != KVASER_LEAF)
+ goto warn;
+ break;
+
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+ if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ec8aa4562cc9..3b3983a1ffbb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
- char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
- for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
+ char *p;
+
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
- break;
+ continue;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
-
- stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 98375e1e1185..1982f7917a8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- netif_carrier_off(netdev);
-
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH();
msleep(10);
+ /* Set the carrier off after transmits have been disabled in the
+ * hardware, to avoid race conditions with e1000_watchdog() (which
+ * may be running concurrently to us, checking for the carrier
+ * bit to decide whether it should enable transmits again). Such
+ * a race condition would result into transmission being disabled
+ * in the hardware until the next IFF_DOWN+IFF_UP cycle.
+ */
+ netif_carrier_off(netdev);
+
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index edbc94c4353d..c5cd233c8fee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2111,6 +2111,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (unlikely(i40e_rx_is_programming_status(qword))) {
i40e_clean_programming_status(rx_ring, rx_desc, qword);
+ cleaned_count++;
continue;
}
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -2277,7 +2278,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
goto enable_int;
}
- if (ITR_IS_DYNAMIC(tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b3d730f4d695..e22bce7cdacd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5673,7 +5673,7 @@ dma_error:
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
- if (i--)
+ if (i-- == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7f503d35eb1c..38bd2e339e48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8156,29 +8156,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
- tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- while (tx_buffer != first) {
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
-
- if (i--)
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
i += tx_ring->count;
- tx_buffer = &tx_ring->tx_buffer_info[i];
+ i--;
}
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
-
dev_kfree_skb_any(first->skb);
first->skb = NULL;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 97efe4733661..794a3b6aa573 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool {
u32 port_map;
};
+#define IS_TSO_HEADER(txq_pcpu, addr) \
+ ((addr) >= (txq_pcpu)->tso_headers_dma && \
+ (addr) < (txq_pcpu)->tso_headers_dma + \
+ (txq_pcpu)->size * TSO_HEADER_SIZE)
+
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
@@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+ tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
if (tcam_data != data)
return false;
return true;
@@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* place holders only - no ports */
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, false);
- mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
- mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+ mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
+ mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
}
/* Set default entries for various types of dsa packets */
@@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
struct mvpp2_prs_entry *pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return NULL;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return -ENOMEM;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
- tx_buf->size, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
@@ -5623,7 +5629,7 @@ cleanup:
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@@ -6212,12 +6218,15 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
+ struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
size_t buf_sz =
mvpp2_txdesc_size_get(port, desc);
- dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
- buf_sz, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@@ -6489,7 +6498,7 @@ out:
}
/* Finalize TX processing */
- if (txq_pcpu->count >= txq->done_pkts_coal)
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ff60cf7342ca..fc281712869b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
-static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
- struct mlx5_core_dev *dev,
- struct mlx5_priv *priv)
+static void delayed_event_release(struct mlx5_device_context *dev_ctx,
+ struct mlx5_priv *priv)
{
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
+ struct list_head temp;
- /* stop delaying events */
- priv->is_accum_events = false;
+ INIT_LIST_HEAD(&temp);
+
+ spin_lock_irq(&priv->ctx_lock);
- /* fire all accumulated events before new event comes */
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
+ priv->is_accum_events = false;
+ list_splice_init(&priv->waiting_events_list, &temp);
+ if (!dev_ctx->context)
+ goto out;
+ list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
+
+out:
+ spin_unlock_irq(&priv->ctx_lock);
+
+ list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list);
kfree(de);
}
}
-static void cleanup_delayed_evets(struct mlx5_priv *priv)
+/* accumulating events that can come after mlx5_ib calls to
+ * ib_register_device, till adding that interface to the events list.
+ */
+static void delayed_event_start(struct mlx5_priv *priv)
{
- struct mlx5_delayed_event *de;
- struct mlx5_delayed_event *n;
-
spin_lock_irq(&priv->ctx_lock);
- priv->is_accum_events = false;
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
- list_del(&de->list);
- kfree(de);
- }
+ priv->is_accum_events = true;
spin_unlock_irq(&priv->ctx_lock);
}
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
return;
dev_ctx->intf = intf;
- /* accumulating events that can come after mlx5_ib calls to
- * ib_register_device, till adding that interface to the events list.
- */
- priv->is_accum_events = true;
+ delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
- fire_delayed_event_locked(dev_ctx, dev, priv);
-
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) {
if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
}
#endif
spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- /* delete all accumulated events */
- cleanup_delayed_evets(priv);
}
+
+ delayed_event_release(dev_ctx, priv);
+
+ if (!dev_ctx->context)
+ kfree(dev_ctx);
}
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
+ delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- return;
+ goto out;
intf->attach(dev, dev_ctx->context);
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- return;
+ goto out;
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
+
+out:
+ delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param);
+ /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
+ * still in priv->ctx_list. In this case, only notify the dev_ctx if its
+ * ADDED or ATTACHED bit are set.
+ */
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
+ if (dev_ctx->intf->event &&
+ (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
+ test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c1d384fca4dc..51c4cc00a186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -41,6 +41,11 @@
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
+enum {
+ MLX5E_VENDOR_TC_GROUP_NUM = 7,
+ MLX5E_LOWEST_PRIO_GROUP = 0,
+};
+
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 tc_group[IEEE_8021QAZ_MAX_TCS];
+ bool is_tc_group_6_exist = false;
+ bool is_zero_bw_ets_tc = false;
int err = 0;
int i;
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
- }
- for (i = 0; i < ets->ets_cap; i++) {
+ err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
+ if (err)
+ return err;
+
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err)
return err;
+
+ if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
+ tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
+ is_zero_bw_ets_tc = true;
+
+ if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
+ is_tc_group_6_exist = true;
+ }
+
+ /* Report 0% ets tc if exits*/
+ if (is_zero_bw_ets_tc) {
+ for (i = 0; i < ets->ets_cap; i++)
+ if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
+ ets->tc_tx_bw[i] = 0;
+ }
+
+ /* Update tc_tsa based on fw setting*/
+ for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
+ !is_tc_group_6_exist)
+ priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
-
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
-enum {
- MLX5E_VENDOR_TC_GROUP_NUM = 7,
- MLX5E_ETS_TC_GROUP_NUM = 0,
-};
-
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
+ bool ets_zero_bw = false;
int strict_group;
int i;
- for (i = 0; i <= max_tc; i++)
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ for (i = 0; i <= max_tc; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true;
+ if (!ets->tc_tx_bw[i])
+ ets_zero_bw = true;
+ }
+ }
- strict_group = any_tc_mapped_to_ets ? 1 : 0;
+ /* strict group has higher priority than ets group */
+ strict_group = MLX5E_LOWEST_PRIO_GROUP;
+ if (any_tc_mapped_to_ets)
+ strict_group++;
+ if (ets_zero_bw)
+ strict_group++;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
+ tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
+ if (ets->tc_tx_bw[i] && ets_zero_bw)
+ tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break;
}
}
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
+ int bw_for_ets_zero_bw_tc = 0;
+ int last_ets_zero_bw_tc = -1;
+ int num_ets_zero_bw = 0;
int i;
for (i = 0; i <= max_tc; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
+ !ets->tc_tx_bw[i]) {
+ num_ets_zero_bw++;
+ last_ets_zero_bw_tc = i;
+ }
+ }
+
+ if (num_ets_zero_bw)
+ bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
+
+ for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i];
+ tc_tx_bw[i] = ets->tc_tx_bw[i] ?
+ ets->tc_tx_bw[i] :
+ bw_for_ets_zero_bw_tc;
break;
}
}
+
+ /* Make sure the total bw for ets zero bw group is 100% */
+ if (last_ets_zero_bw_tc != -1)
+ tc_tx_bw[last_ets_zero_bw_tc] +=
+ MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
+/* If there are ETS BW 0,
+ * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
+ * Set group #0 to all the ETS BW 0 tcs and
+ * equally splits the 100% BW between them
+ * Report both group #0 and #1 as ETS type.
+ * All the tcs in group #0 will be reported with 0% BW.
+ */
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
-
return err;
}
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
- if (!ets->tc_tx_bw[i]) {
- netdev_err(netdev,
- "Failed to validate ETS: BW 0 is illegal\n");
- return -EINVAL;
- }
-
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
bw_sum += ets->tc_tx_bw[i];
- }
- }
if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct ieee_ets ets;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
return;
}
- if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
- *bw_pct = 0;
+ mlx5e_dcbnl_ieee_getets(netdev, &ets);
+ *bw_pct = ets.tc_tx_bw[pgid];
}
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
-
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1aa2028ed995..9ba1f72060aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
+ struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
+ int mirred_ifindex;
};
enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct ip_tunnel_info *tun_info,
+ struct net_device *mirred_dev,
+ struct net_device **encap_dev,
+ struct mlx5e_tc_flow *flow);
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5_flow_handle *rule;
+ struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5_flow_handle *rule = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
int err;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ out_dev = __dev_get_by_index(dev_net(priv->netdev),
+ attr->parse_attr->mirred_ifindex);
+ err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+ out_dev, &encap_dev, flow);
+ if (err) {
+ rule = ERR_PTR(err);
+ if (err != -EAGAIN)
+ goto err_attach_encap;
+ }
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err) {
rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
}
- rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(rule))
- goto err_add_rule;
-
+ /* we get here if (1) there's no error (rule being null) or when
+ * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
+ */
+ if (rule != ERR_PTR(-EAGAIN)) {
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(rule))
+ goto err_add_rule;
+ }
return rule;
err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
+err_attach_encap:
return rule;
}
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_tc_flow *flow;
int err;
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) {
- flow->esw_attr->encap_id = e->encap_id;
- flow->rule = mlx5e_tc_add_fdb_flow(priv,
- flow->esw_attr->parse_attr,
- flow);
+ esw_attr = flow->esw_attr;
+ esw_attr->encap_id = e->encap_id;
+ flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *flow;
- struct mlx5_fc *counter;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
}
}
@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out_dev, *encap_dev = NULL;
+ struct net_device *out_dev;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- err = mlx5e_attach_encap(priv, info,
- out_dev, &encap_dev, flow);
- if (err && err != -EAGAIN)
- return err;
+ parse_attr->mirred_ifindex = ifindex;
+ parse_attr->tun_info = *info;
+ attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- attr->out_rep = rpriv->rep;
- attr->parse_attr = parse_attr;
+ /* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
- goto err_handle_encap_flow;
+ goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_free;
+ if (err != -EAGAIN)
+ goto err_free;
}
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ if (err != -EAGAIN)
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
-err_handle_encap_flow:
- if (err == -EAGAIN) {
- err = rhashtable_insert_fast(&tc->ht, &flow->node,
- tc->ht_params);
- if (err)
- mlx5e_tc_del_flow(priv, flow);
- else
- return 0;
- }
-
err_free:
kvfree(parse_attr);
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 185dcac0abe7..1a0e797ad001 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -354,10 +354,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1975d4388d4f..e07061f565d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group)
+{
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+ void *ets_tcn_conf;
+ int err;
+
+ err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
+ tc_configuration[tc]);
+
+ *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+ group);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
+
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0a5fc9f8545f..de64cedf8b26 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -127,6 +127,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
*/
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
+ if (!nfp_netdev_is_nfp_repr(out_dev))
+ return -EOPNOTSUPP;
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
if (!output->port)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 5efef8001edf..3256e5cbad27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
- if (of_property_read_u32(np, "read,read-requests",
+ if (of_property_read_u32(np, "snps,read-requests",
&plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 9e616da0745d..645ef949705f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_to_use = 1;
plat->tx_queues_to_use = 1;
+ /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
+ * to always set this, otherwise Queue will be classified as AVB
+ * (because MTL_QUEUE_AVB = 0).
+ */
+ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node)
return;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 5dea2063dbc8..0bcc07f346c3 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
{
int err;
- err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
-
+ err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
+ THIS_MODULE);
if (err)
goto out1;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index f62aea2fcfa9..9a10029caf83 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -204,8 +204,8 @@ static int macvtap_init(void)
{
int err;
- err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
-
+ err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
+ THIS_MODULE);
if (err)
goto out1;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 21b71ae947fd..1b10fcc6a58d 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
+ if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ sk_free(&q->sk);
+ goto err;
+ }
RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
- err = -ENOMEM;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
- goto err_array;
-
err = tap_set_queue(tap, file, q);
- if (err)
- goto err_queue;
+ if (err) {
+ /* tap_sock_destruct() will take care of freeing skb_array */
+ goto err_put;
+ }
dev_put(tap->dev);
rtnl_unlock();
return err;
-err_queue:
- skb_array_cleanup(&q->skb_array);
-err_array:
+err_put:
sock_put(&q->sk);
err:
if (tap)
@@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name)
return 0;
}
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name)
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module)
{
int err;
@@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
goto out1;
cdev_init(tap_cdev, &tap_fops);
+ tap_cdev->owner = module;
err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
if (err)
goto out2;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ea29da91ea5a..81d0787a6dab 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1444,6 +1444,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
+ alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
@@ -2253,7 +2254,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
err = dev_get_valid_name(net, dev, name);
- if (err)
+ if (err < 0)
goto err_free_dev;
dev_net_set(dev, net);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 52ea80bcd639..3e7a3ac3a362 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -561,6 +561,7 @@ static const struct driver_info wwan_info = {
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
+#define TPLINK_VENDOR_ID 0x2357
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -813,6 +814,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+ /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
@@ -864,6 +872,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t)&wwan_info,
}, {
+ /* Huawei ME906 and ME909 */
+ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* ZTE modules */
USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 941ece08ba78..d51d9abf7986 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -615,6 +615,7 @@ enum rtl8152_flags {
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -5319,6 +5320,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
{}
};
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index af075e998944..be49d0f79381 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_abort_aen_ops(ctrl);
/* wait for all io that had to be aborted */
- spin_lock_irqsave(&ctrl->lock, flags);
+ spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
ctrl->flags &= ~FCCTRL_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, retry;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
+ init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
@@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- ret = nvme_fc_create_association(ctrl);
+ /*
+ * It's possible that transactions used to create the association
+ * may fail. Examples: CreateAssociation LS or CreateIOConnection
+ * LS gets dropped/corrupted/fails; or a frame gets dropped or a
+ * command times out for one of the actions to init the controller
+ * (Connect, Get/Set_Property, Set_Features, etc). Many of these
+ * transport errors (frame drop, LS failure) inherently must kill
+ * the association. The transport is coded so that any command used
+ * to create the association (prior to a LIVE state transition
+ * while NEW or RECONNECTING) will fail if it completes in error or
+ * times out.
+ *
+ * As such: as the connect request was mostly likely due to a
+ * udev event that discovered the remote port, meaning there is
+ * not an admin or script there to restart if the connect
+ * request fails, retry the initial connection creation up to
+ * three times before giving up and declaring failure.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ ret = nvme_fc_create_association(ctrl);
+ if (!ret)
+ break;
+ }
+
if (ret) {
+ /* couldn't schedule retry - fail out */
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
+
ctrl->ctrl.opts = NULL;
+
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 92a03ff5fb4d..87bac27ec64b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
return;
+ if (nvme_rdma_queue_idx(queue) == 0) {
+ nvme_rdma_free_qe(queue->device->dev,
+ &queue->ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ }
+
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -739,8 +745,6 @@ out:
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset))
+ if (IS_ERR(ctrl->ctrl.admin_tagset)) {
+ error = PTR_ERR(ctrl->ctrl.admin_tagset);
goto out_free_queue;
+ }
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (new) {
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset))
+ if (IS_ERR(ctrl->ctrl.tagset)) {
+ ret = PTR_ERR(ctrl->ctrl.tagset);
goto out_free_io_queues;
+ }
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1b208beeef50..645ba7eee35d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ u32 old_sqhd, new_sqhd;
+ u16 sqhd;
+
if (status)
nvmet_set_status(req, status);
- if (req->sq->size)
- req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
- req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
+ if (req->sq->size) {
+ do {
+ old_sqhd = req->sq->sqhd;
+ new_sqhd = (old_sqhd + 1) % req->sq->size;
+ } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
+ old_sqhd);
+ }
+ sqhd = req->sq->sqhd & 0x0000FFFF;
+ req->rsp->sq_head = cpu_to_le16(sqhd);
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7b8e20adf760..87e429bfcd8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,7 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
- u16 sqhd;
+ u32 sqhd;
struct completion free_done;
struct completion confirm_done;
};
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 73ebad6634a7..89c887ea5557 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -111,6 +111,8 @@
#define MVEBU_COMPHY_CONF6_40B BIT(18)
#define MVEBU_COMPHY_SELECTOR 0x1140
#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
+#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
+#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
#define MVEBU_COMPHY_LANES 6
#define MVEBU_COMPHY_PORTS 3
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
- int ret;
- u32 mux, val;
+ int ret, mux;
+ u32 val;
mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode);
if (mux < 0)
return -ENOTSUPP;
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val);
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id);
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy)
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val);
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
return 0;
}
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->regmap);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (!priv->base)
- return -ENOMEM;
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_comphy_lane *lane;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index e3baad78521f..721a2a1c97ef 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -27,6 +27,7 @@
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
+#define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */
/* u2 phy bank */
#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
/* u3/pcie/sata phy banks */
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy,
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
- u3_banks->chip = NULL;
+ u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
break;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 4d2c57f21d76..a958c9bced01 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy,
return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
}
+static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
+{
+ u16 tx_ana_ctrl_reg_1;
+
+ /*
+ * Select the polarity of the xcvr:
+ * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
+ * down aux_m)
+ * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down
+ * aux_p)
+ */
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ if (!tcphy->flip)
+ tx_ana_ctrl_reg_1 |= BIT(12);
+ else
+ tx_ana_ctrl_reg_1 &= ~BIT(12);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+}
+
static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
{
+ u16 tx_ana_ctrl_reg_1;
u16 rdata, rdata2, val;
/* disable txda_cal_latch_en for rewrite the calibration values */
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata & 0xdfff;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 &= ~BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
* Activate this signal for 1 clock cycle to sample new calibration
* values.
*/
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata | 0x2000;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
usleep_range(150, 200);
/* set TX Voltage Level and TX Deemphasis to 0 */
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
/* re-enable decap */
writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(3);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(4);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
/* re-enables Bandgap reference for LDO */
- writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(7);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(8);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* re-enables the transmitter pre-driver, driver data selection MUX,
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
/*
- * BIT 12: Controls auxda_polarity, which selects the polarity of the
- * xcvr:
- * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
- * down aux_m)
- * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
- * aux_p)
+ * Do some magic undocumented stuff, some of which appears to
+ * undo the "re-enables Bandgap reference for LDO" above.
*/
- val = 0xa078;
- if (!tcphy->flip)
- val |= BIT(12);
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(15);
+ tx_ana_ctrl_reg_1 &= ~BIT(8);
+ tx_ana_ctrl_reg_1 &= ~BIT(7);
+ tx_ana_ctrl_reg_1 |= BIT(6);
+ tx_ana_ctrl_reg_1 |= BIT(5);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
/*
- * Controls low_power_swing_en, set the voltage swing of the driver
- * to 400mv. The values below are peak to peak (differential) values.
+ * Controls low_power_swing_en, don't set the voltage swing of the
+ * driver to 400mv. The values below are peak to peak (differential)
+ * values.
*/
- writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL);
+ writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
/* Controls tx_high_z_tm_en */
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
reset_control_deassert(tcphy->tcphy_rst);
property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
+ tcphy_dp_aux_set_flip(tcphy);
tcphy_cfg_24m(tcphy);
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
if (tcphy->mode == new_mode)
goto unlock_ret;
- if (tcphy->mode == MODE_DISCONNECT)
- tcphy_phy_init(tcphy, new_mode);
+ if (tcphy->mode == MODE_DISCONNECT) {
+ ret = tcphy_phy_init(tcphy, new_mode);
+ if (ret)
+ goto unlock_ret;
+ }
/* wait TCPHY for pipe ready */
for (timeout = 0; timeout < 100; timeout++) {
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
*/
if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
tcphy_phy_deinit(tcphy);
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
} else if (tcphy->mode == MODE_DISCONNECT) {
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
}
+ if (ret)
+ goto unlock_ret;
ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
val, val & DP_MODE_A2, 1000,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 3cbcb2537657..4307bf0013e1 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
char *name;
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
np = of_find_node_by_name(np, name);
kfree(name);
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3f6b34febbf1..433af328d981 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
continue;
irq = irq_find_mapping(gc->irqdomain, irqnr + i);
generic_handle_irq(irq);
- /* Clear interrupt */
+
+ /* Clear interrupt.
+ * We must read the pin register again, in case the
+ * value was changed while executing
+ * generic_handle_irq() above.
+ */
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ regval = readl(regs + i);
writel(regval, regs + i);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
ret = IRQ_HANDLED;
}
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3e40d4245512..9c950bbf07ba 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
ret = mcp_read(mcp, MCP_GPIO, &status);
if (ret < 0)
status = 0;
- else
+ else {
+ mcp->cached_gpio = status;
status = !!(status & (1 << offset));
-
- mcp->cached_gpio = status;
+ }
mutex_unlock(&mcp->lock);
return status;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index bb792a52248b..e03fa31446ca 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/spinlock.h>
#include <asm/intel_pmc_ipc.h>
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
/* gcr */
void __iomem *gcr_mem_base;
bool has_gcr_regs;
+ spinlock_t gcr_lock;
/* punit */
struct platform_device *punit_dev;
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
{
int ret;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
*data = readl(ipcdev.gcr_mem_base + offset);
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
{
int ret;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
writel(data, ipcdev.gcr_mem_base + offset);
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
u32 new_val;
int ret = 0;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0)
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
}
gcr_ipc_unlock:
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- resource_size_t pci_resource;
+ struct intel_pmc_ipc_dev *pmc = &ipcdev;
int ret;
- int len;
- ipcdev.dev = &pci_dev_get(pdev)->dev;
- ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+ /* Only one PMC is supported */
+ if (pmc->dev)
+ return -EBUSY;
- ret = pci_enable_device(pdev);
+ pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
+
+ spin_lock_init(&ipcdev.gcr_lock);
+
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- ret = pci_request_regions(pdev, "intel_pmc_ipc");
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (ret)
return ret;
- pci_resource = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- if (!pci_resource || !len) {
- dev_err(&pdev->dev, "Failed to get resource\n");
- return -ENOMEM;
- }
+ init_completion(&pmc->cmd_complete);
- init_completion(&ipcdev.cmd_complete);
+ pmc->ipc_base = pcim_iomap_table(pdev)[0];
- if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+ ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
+ pmc);
+ if (ret) {
dev_err(&pdev->dev, "Failed to request irq\n");
- return -EBUSY;
+ return ret;
}
- ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
- if (!ipcdev.ipc_base) {
- dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
- free_irq(pdev->irq, &ipcdev);
- ret = -ENOMEM;
- }
+ pmc->dev = &pdev->dev;
- return ret;
-}
+ pci_set_drvdata(pdev, pmc);
-static void ipc_pci_remove(struct pci_dev *pdev)
-{
- free_irq(pdev->irq, &ipcdev);
- pci_release_regions(pdev);
- pci_dev_put(pdev);
- iounmap(ipcdev.ipc_base);
- ipcdev.dev = NULL;
+ return 0;
}
static const struct pci_device_id ipc_pci_ids[] = {
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
.name = "intel_pmc_ipc",
.id_table = ipc_pci_ids,
.probe = ipc_pci_probe,
- .remove = ipc_pci_remove,
};
static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
+ res->end = res->start + size - 1;
+
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
- if (!request_mem_region(res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "Failed to request ipc resource\n");
- return -EBUSY;
- }
- addr = ioremap_nocache(res->start, size);
- if (!addr) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- release_mem_region(res->start, size);
- return -ENOMEM;
- }
ipcdev.ipc_base = addr;
ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
static int ipc_plat_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret;
ipcdev.dev = &pdev->dev;
ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
init_completion(&ipcdev.cmd_complete);
+ spin_lock_init(&ipcdev.gcr_lock);
ipcdev.irq = platform_get_irq(pdev, 0);
if (ipcdev.irq < 0) {
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
ret = ipc_create_pmc_devices();
if (ret) {
dev_err(&pdev->dev, "Failed to create pmc devices\n");
- goto err_device;
+ return ret;
}
- if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND,
- "intel_pmc_ipc", &ipcdev)) {
+ if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
+ "intel_pmc_ipc", &ipcdev)) {
dev_err(&pdev->dev, "Failed to request irq\n");
ret = -EBUSY;
goto err_irq;
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
return 0;
err_sys:
- free_irq(ipcdev.irq, &ipcdev);
+ devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
-err_device:
- iounmap(ipcdev.ipc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_IPC_INDEX);
- if (res) {
- release_mem_region(res->start,
- PLAT_RESOURCE_IPC_SIZE +
- PLAT_RESOURCE_GCR_SIZE);
- }
+
return ret;
}
static int ipc_plat_remove(struct platform_device *pdev)
{
- struct resource *res;
-
sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
- free_irq(ipcdev.irq, &ipcdev);
+ devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
- iounmap(ipcdev.ipc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_IPC_INDEX);
- if (res) {
- release_mem_region(res->start,
- PLAT_RESOURCE_IPC_SIZE +
- PLAT_RESOURCE_GCR_SIZE);
- }
ipcdev.dev = NULL;
return 0;
}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f18b36dd57dd..376a99b7cf5d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
case AXP803_DCDC3:
return !!(reg & BIT(6));
case AXP803_DCDC6:
- return !!(reg & BIT(7));
+ return !!(reg & BIT(5));
}
break;
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index ef2be56460fe..790a4a73ea2c 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
};
#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
- [RN5T618_##rid] = { \
+ { \
.name = #rid, \
.of_match = of_match_ptr(#rid), \
.regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 82ac331d9125..84752152d41f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
+ adapter->erp_action.adapter = adapter;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
+ port->erp_action.adapter = adapter;
+ port->erp_action.port = port;
+
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 37408f5f81ce..ec2532ee1822 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
- erp_action->sdev = sdev;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ WARN_ON_ONCE(erp_action->port != NULL);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
- erp_action->adapter = adapter;
+ WARN_ON_ONCE(erp_action->adapter != adapter);
+ memset(&erp_action->list, 0, sizeof(erp_action->list));
+ memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+ erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+ erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ec3ddd1d31d5..6cf8732627e0 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+ zfcp_sdev->erp_action.adapter = adapter;
+ zfcp_sdev->erp_action.sdev = sdev;
+
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
+ zfcp_sdev->erp_action.port = port;
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 97d269f16888..1bc623ad3faf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
return -ENOMEM;
aac_fib_init(fibctx);
- mutex_lock(&dev->ioctl_mutex);
- dev->adapter_shutdown = 1;
- mutex_unlock(&dev->ioctl_mutex);
+ if (!dev->adapter_shutdown) {
+ mutex_lock(&dev->ioctl_mutex);
+ dev->adapter_shutdown = 1;
+ mutex_unlock(&dev->ioctl_mutex);
+ }
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 62beb2596466..c9252b138c1f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
{
int i;
+ mutex_lock(&aac->ioctl_mutex);
aac->adapter_shutdown = 1;
- aac_send_shutdown(aac);
+ mutex_unlock(&aac->ioctl_mutex);
if (aac->aif_thread) {
int i;
@@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
}
kthread_stop(aac->thread);
}
+
+ aac_send_shutdown(aac);
+
aac_adapter_disable_int(aac);
+
if (aac_is_src(aac)) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9abe81021484..4ed3d26ffdde 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
memset(id_ctlr, 0, sizeof(*id_ctlr));
rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
if (!rc)
- if (id_ctlr->configured_logical_drive_count < 256)
+ if (id_ctlr->configured_logical_drive_count < 255)
*nlocals = id_ctlr->configured_logical_drive_count;
else
*nlocals = le16_to_cpu(
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 937209805baf..3bd956d3bc5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -3175,8 +3177,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
-
if (ha->mqenable) {
bool mq = false;
bool startit = false;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9cf6a80fe297..ad3ea24f0885 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
ret = scsi_setup_cmnd(sdev, req);
out:
- if (ret != BLKPREP_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
return scsi_prep_return(q, req, ret);
}
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scatterlist *sg;
- int ret;
scsi_init_command(sdev, cmd);
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
blk_mq_start_request(req);
- ret = scsi_setup_cmnd(sdev, req);
- if (ret != BLK_STS_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
- return ret;
+ return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0419c2298eab..aa28874e8fb9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val > SG_MAX_QUEUE)
+ if (val >= SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 6c7d7a460689..568e1c65aa82 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -99,11 +99,6 @@
/* A3700_SPI_IF_TIME_REG */
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
-/* Flags and macros for struct a3700_spi */
-#define A3700_INSTR_CNT 1
-#define A3700_ADDR_CNT 3
-#define A3700_DUMMY_CNT 1
-
struct a3700_spi {
struct spi_master *master;
void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
u8 byte_len;
u32 wait_mask;
struct completion done;
- u32 addr_cnt;
- u32 instr_cnt;
- size_t hdr_cnt;
};
static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
}
static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
- unsigned int pin_mode)
+ unsigned int pin_mode, bool receiving)
{
u32 val;
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
break;
case SPI_NBITS_QUAD:
val |= A3700_SPI_DATA_PIN1;
+ /* RX during address reception uses 4-pin */
+ if (receiving)
+ val |= A3700_SPI_ADDR_PIN;
break;
default:
dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
- return true;
+ /* Timeout was reached */
+ return false;
}
static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
{
- u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+ unsigned int addr_cnt;
u32 val = 0;
/* Clear the header registers */
spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
/* Set header counters */
if (a3700_spi->tx_buf) {
- if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
- instr_cnt = a3700_spi->buf_len;
- } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
- a3700_spi->addr_cnt)) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->buf_len - instr_cnt;
- } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->addr_cnt;
- /* Need to handle the normal write case with 1 byte
- * data
- */
- if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
- dummy_cnt = a3700_spi->buf_len - instr_cnt -
- addr_cnt;
+ /*
+ * when tx data is not 4 bytes aligned, there will be unexpected
+ * bytes out of SPI output register, since it always shifts out
+ * as whole 4 bytes. This might cause incorrect transaction with
+ * some devices. To avoid that, use SPI header count feature to
+ * transfer up to 3 bytes of data first, and then make the rest
+ * of data 4-byte aligned.
+ */
+ addr_cnt = a3700_spi->buf_len % 4;
+ if (addr_cnt) {
+ val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= addr_cnt;
+
+ /* transfer 1~3 bytes through address count */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
- val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
- << A3700_SPI_INSTR_CNT_BIT);
- val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
- << A3700_SPI_ADDR_CNT_BIT);
- val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
- << A3700_SPI_DUMMY_CNT_BIT);
}
- spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
-
- /* Update the buffer length to be transferred */
- a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
-
- /* Set Instruction */
- val = 0;
- while (instr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
-
- /* Set Address */
- val = 0;
- while (addr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
{
u32 val;
- int i = 0;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = 0;
- if (a3700_spi->buf_len >= 4) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
-
- a3700_spi->buf_len -= 4;
- a3700_spi->tx_buf += 4;
- } else {
- /*
- * If the remained buffer length is less than 4-bytes,
- * we should pad the write buffer with all ones. So that
- * it avoids overwrite the unexpected bytes following
- * the last one.
- */
- val = GENMASK(31, 0);
- while (a3700_spi->buf_len) {
- val &= ~(0xff << (8 * i));
- val |= *a3700_spi->tx_buf++ << (8 * i);
- i++;
- a3700_spi->buf_len--;
-
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
- val);
- }
- break;
- }
+ val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
}
return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
- /* SPI transfer headers */
- a3700_spi_header_set(a3700_spi);
-
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
else if (xfer->rx_buf)
nbits = xfer->rx_nbits;
- a3700_spi_pin_mode_set(a3700_spi, nbits);
+ a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+
+ /* Flush the FIFOs */
+ a3700_spi_fifo_flush(a3700_spi);
+
+ /* Transfer first bytes of data when buffer is not 4-byte aligned */
+ a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
/* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
dev_err(&spi->dev, "wait wfifo empty timed out\n");
return -ETIMEDOUT;
}
- } else {
- /*
- * If the instruction in SPI_INSTR does not require data
- * to be written to the SPI device, wait until SPI_RDY
- * is 1 for the SPI interface to be in idle.
- */
- if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
- dev_err(&spi->dev, "wait xfer ready timed out\n");
- return -ETIMEDOUT;
- }
+ }
+
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
}
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
memset(spi, 0, sizeof(struct a3700_spi));
spi->master = master;
- spi->instr_cnt = A3700_INSTR_CNT;
- spi->addr_cnt = A3700_ADDR_CNT;
- spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
- A3700_DUMMY_CNT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6ef6c44f39f5..a172ab299e80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
goto qspi_probe_err;
}
} else {
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT])) {
ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
}
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!qspi->dev_ids) {
ret = -ENOMEM;
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
- spi_master_put(master);
kfree(qspi->dev_ids);
+qspi_resource_err:
+ spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 680cdf549506..ba9743fa2326 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
* no need to check it there.
* However, we need to ensure the following calculations.
*/
- if ((div < SPI_MBR_DIV_MIN) &&
- (div > SPI_MBR_DIV_MAX))
+ if (div < SPI_MBR_DIV_MIN ||
+ div > SPI_MBR_DIV_MAX)
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6e65524cbfd9..e8b5a5e21b2e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -45,7 +45,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
-#define SPI_DYN_FIRST_BUS_NUM 0
static DEFINE_IDR(spi_master_idr);
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr)
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
- int id;
+ int id, first_dynamic;
if (!dev)
return -ENODEV;
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr)
}
}
if (ctlr->bus_num < 0) {
+ first_dynamic = of_alias_get_highest_id("spi");
+ if (first_dynamic < 0)
+ first_dynamic = 0;
+ else
+ first_dynamic++;
+
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0,
- GFP_KERNEL);
+ id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
+ 0, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 1691760339da..02573c517d9d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
reg_address);
goto error_ret;
}
- *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+ *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
(st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
error_ret:
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 5f3d8f2339e3..4be864dbd41c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
__func__, instance);
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
- ret = 0; // xxx todo -1;
- goto err_free_mem;
+ return 0;
}
/* Initialize and create a VCHI connection */
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ return -EIO;
}
ret = vchi_connect(NULL, 0, vchi_instance);
if (ret) {
LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ kfree(vchi_instance);
+ return -EIO;
}
initted = 1;
}
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
if (IS_ERR(instance)) {
LOG_ERR("%s: failed to initialize audio service\n", __func__);
- ret = PTR_ERR(instance);
- goto err_free_mem;
+ /* vchi_instance is retained for use the next time. */
+ return PTR_ERR(instance);
}
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
LOG_DBG(" success !\n");
- ret = 0;
-err_free_mem:
- kfree(vchi_instance);
- return ret;
+ return 0;
}
int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e056064259c..18c923a4c16e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 68b54bd88d1e..883549ee946c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -960,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
- length = cap->bLength;
- if (total_len < length)
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+ dev->bos->desc->bNumDeviceCaps = i;
break;
+ }
+ length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4664e543cf2f..e9326f31db8d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1576,11 +1576,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
- if (totlen <= uurb->buffer_length)
- uurb->buffer_length = totlen;
- else
- WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
- totlen, uurb->buffer_length);
+ uurb->buffer_length = totlen;
break;
default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b5c733613823..e9ce6bb0b22d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if the connection bounced. A USB 3.0
- * connection may bounce if multiple warm resets were issued,
+ /* Retry if connect change is set but status is still connected.
+ * A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
- (portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
+ (portchange & USB_PORT_STAT_C_CONNECTION)) {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return -EAGAIN;
+ }
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e311202..a6aaf2f193a4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index da9158f171cb..a2336deb5e36 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -420,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_free_command(xhci, cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto cmd_cleanup;
+ }
+
+ ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+ i, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, command);
+ goto cmd_cleanup;
}
- xhci_queue_stop_endpoint(xhci, command, slot_id, i,
- suspend);
}
}
- xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto cmd_cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -439,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
+
+cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a9443651ce0f..82c746e2d85c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
struct xhci_command *cur_cmd, *tmp_cmd;
+ xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
}
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(struct xhci_generic_trb *) ep_trb);
/*
- * No-op TRB should not trigger interrupts.
- * If ep_trb is a no-op TRB, it means the
- * corresponding TD has been cancelled. Just ignore
- * the TD.
+ * No-op TRB could trigger interrupts in a case where
+ * a URB was killed and a STALL_ERROR happens right
+ * after the endpoint ring stopped. Reset the halted
+ * endpoint. Otherwise, the endpoint remains stalled
+ * indefinitely.
*/
if (trb_is_noop(ep_trb)) {
- xhci_dbg(xhci,
- "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
- slot_id, ep_index);
+ if (trb_comp_code == COMP_STALL_ERROR ||
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ trb_comp_code))
+ xhci_cleanup_halted_endpoint(xhci, slot_id,
+ ep_index,
+ ep_ring->stream_id,
+ td, ep_trb,
+ EP_HARD_RESET);
goto cleanup;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ee198ea47f49..51535ba2bcd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4805,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
hcd->has_tt = 1;
} else {
- if (xhci->sbrn == 0x31) {
+ /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+ if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 029692053dd3..ff5a1a8989d5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -906,7 +906,7 @@ b_host:
*/
if (int_usb & MUSB_INTR_RESET) {
handled = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
@@ -916,9 +916,7 @@ b_host:
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
-
- if (is_host_active(musb))
- musb_recover_from_babble(musb);
+ musb_recover_from_babble(musb);
} else {
musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on invalid vbus, assume no session");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
/* fall through */
case MUSB_QUIRK_A_DISCONNECT_19:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on possible host mode disconnect");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
if (!musb->session)
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
musb_platform_disable(musb);
musb_disable_interrupts(musb);
+
+ musb->flush_irq_work = true;
+ while (flush_delayed_work(&musb->irq_work))
+ ;
+ musb->flush_irq_work = false;
+
if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index c748f4ac1154..20f4614178d9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -428,6 +428,8 @@ struct musb {
unsigned test_mode:1;
unsigned softconnect:1;
+ unsigned flush_irq_work:1;
+
u8 address;
u8 test_mode_nr;
u16 ackpend; /* ep0 */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ba255280a624..1ec0a4947b6b 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -26,15 +26,28 @@
#define MUSB_DMA_NUM_CHANNELS 15
+#define DA8XX_USB_MODE 0x10
+#define DA8XX_USB_AUTOREQ 0x14
+#define DA8XX_USB_TEARDOWN 0x1c
+
+#define DA8XX_DMA_NUM_CHANNELS 4
+
struct cppi41_dma_controller {
struct dma_controller controller;
- struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
- struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel *rx_channel;
+ struct cppi41_dma_channel *tx_channel;
struct hrtimer early_tx;
struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
+
+ u32 tdown_reg;
+ u32 autoreq_reg;
+
+ void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode);
+ u8 num_channels;
};
static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
}
}
+static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ unsigned int shift;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->tx_mode;
+ port = cppi41_channel->port_num;
+
+ shift = (port - 1) * 4;
+ if (!cppi41_channel->is_tx)
+ shift += 16;
+ new_mode = old_mode & ~(3 << shift);
+ new_mode |= mode << shift;
+
+ if (new_mode == old_mode)
+ return;
+ controller->tx_mode = new_mode;
+ musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
+}
+
+
static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
if (new_mode == old_mode)
return;
controller->auto_req = new_mode;
- musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
- new_mode);
+ musb_writel(controller->controller.musb->ctrl_base,
+ controller->autoreq_reg, new_mode);
}
static bool cppi41_configure_channel(struct dma_channel *channel,
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), len);
/* gen rndis */
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_GEN_RNDIS);
/* auto req */
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
- cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
struct cppi41_dma_channel *cppi41_channel = NULL;
u8 ch_num = hw_ep->epnum - 1;
- if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+ if (ch_num >= controller->num_channels)
return NULL;
if (is_tx)
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
do {
if (is_tx)
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg,
+ tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
if (is_tx) {
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
struct dma_chan *dc;
int i;
- for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+ for (i = 0; i < ctrl->num_channels; i++) {
dc = ctrl->tx_channel[i].dc;
if (dc)
dma_release_channel(dc);
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
goto err;
ret = -EINVAL;
- if (port > MUSB_DMA_NUM_CHANNELS || !port)
+ if (port > controller->num_channels || !port)
goto err;
if (is_tx)
cppi41_channel = &controller->tx_channel[port - 1];
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
+ kfree(controller->rx_channel);
+ kfree(controller->tx_channel);
kfree(controller);
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
@@ -705,6 +749,7 @@ struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct cppi41_dma_controller *controller;
+ int channel_size;
int ret = 0;
if (!musb->controller->parent->of_node) {
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.is_compatible = cppi41_is_compatible;
controller->controller.musb = musb;
+ if (musb->io.quirks & MUSB_DA8XX) {
+ controller->tdown_reg = DA8XX_USB_TEARDOWN;
+ controller->autoreq_reg = DA8XX_USB_AUTOREQ;
+ controller->set_dma_mode = da8xx_set_dma_mode;
+ controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
+ } else {
+ controller->tdown_reg = USB_TDOWN;
+ controller->autoreq_reg = USB_CTRL_AUTOREQ;
+ controller->set_dma_mode = cppi41_set_dma_mode;
+ controller->num_channels = MUSB_DMA_NUM_CHANNELS;
+ }
+
+ channel_size = controller->num_channels *
+ sizeof(struct cppi41_dma_channel);
+ controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->rx_channel)
+ goto rx_channel_alloc_fail;
+ controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->tx_channel)
+ goto tx_channel_alloc_fail;
+
ret = cppi41_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
+ kfree(controller->tx_channel);
+tx_channel_alloc_fail:
+ kfree(controller->rx_channel);
+rx_channel_alloc_fail:
kfree(controller);
kzalloc_fail:
if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c9a09b5bb6e5..dc353e24d53c 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
return 0;
}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index cc84da8dbb84..14511d6a7d44 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 82360594fa8e..57efbd3b053b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
mutex_unlock(&priv->lock);
if (use_ptemod) {
+ map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
set_grant_ptes_as_special, NULL);
}
#endif
- map->pages_vm_start = vma->vm_start;
}
return 0;
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e89136ab851e..b437fccd4e62 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
static void watch_target(struct xenbus_watch *watch,
const char *path, const char *token)
{
- unsigned long long new_target;
+ unsigned long long new_target, static_max;
int err;
static bool watch_fired;
static long target_diff;
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
* pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
*/
new_target >>= PAGE_SHIFT - 10;
- if (watch_fired) {
- balloon_set_new_target(new_target - target_diff);
- return;
+
+ if (!watch_fired) {
+ watch_fired = true;
+ err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
+ &static_max);
+ if (err != 1)
+ static_max = new_target;
+ else
+ static_max >>= PAGE_SHIFT - 10;
+ target_diff = xen_pv_domain() ? 0
+ : static_max - balloon_stats.target_pages;
}
- watch_fired = true;
- target_diff = new_target - balloon_stats.target_pages;
+ balloon_set_new_target(new_target - target_diff);
}
static struct xenbus_watch target_watch = {
.node = "memory/target",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 157fe59fbabe..1978a8cb1cb1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1991,6 +1991,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+ spin_unlock(&ci->i_ceph_lock);
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
}
@@ -2008,8 +2009,10 @@ retry:
mutex_lock(&session->s_mutex);
goto retry;
}
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+ if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+ spin_unlock(&ci->i_ceph_lock);
goto out;
+ }
flushing = __mark_caps_flushing(inode, session, true,
&flush_tid, &oldest_flush_tid);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f7243617316c..d5b2e12b5d02 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -5,9 +5,14 @@ config CIFS
select CRYPTO
select CRYPTO_MD4
select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_CMAC
select CRYPTO_HMAC
select CRYPTO_ARC4
+ select CRYPTO_AEAD2
+ select CRYPTO_CCM
select CRYPTO_ECB
+ select CRYPTO_AES
select CRYPTO_DES
help
This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de5b2e1fcce5..e185b2853eab 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,7 +661,9 @@ struct TCP_Server_Info {
#endif
unsigned int max_read;
unsigned int max_write;
- __u8 preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+ __u8 preauth_sha_hash[64]; /* save initital negprot hash */
+#endif /* 3.1.1 */
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
unsigned long echo_interval;
@@ -849,7 +851,9 @@ struct cifs_ses {
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
- __u8 preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+ __u8 preauth_sha_hash[64];
+#endif /* 3.1.1 */
};
static inline bool
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 7ca9808a0daa..62c88dfed57b 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
- {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+ {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
{STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0dafdbae1f8c..bdb963d0ba32 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -522,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct smb2_file_full_ea_info *smb2_data;
+ int ea_buf_size = SMB2_MIN_EA_BUF;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -541,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
- smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL);
- if (smb2_data == NULL) {
- SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- return -ENOMEM;
+ while (1) {
+ smb2_data = kzalloc(ea_buf_size, GFP_KERNEL);
+ if (smb2_data == NULL) {
+ SMB2_close(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid);
+ return -ENOMEM;
+ }
+
+ rc = SMB2_query_eas(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid,
+ ea_buf_size, smb2_data);
+
+ if (rc != -E2BIG)
+ break;
+
+ kfree(smb2_data);
+ ea_buf_size <<= 1;
+
+ if (ea_buf_size > SMB2_MAX_EA_BUF) {
+ cifs_dbg(VFS, "EA size is too large\n");
+ SMB2_close(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid);
+ return -ENOMEM;
+ }
}
- rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid,
- smb2_data);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (!rc)
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 6f0e6343c15e..5331631386a2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -648,7 +648,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc = 0;
struct validate_negotiate_info_req vneg_inbuf;
- struct validate_negotiate_info_rsp *pneg_rsp;
+ struct validate_negotiate_info_rsp *pneg_rsp = NULL;
u32 rsplen;
u32 inbuflen; /* max of 4 dialects */
@@ -727,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
- if (rsplen > CIFSMaxBufSize)
- return -EIO;
+ if ((rsplen > CIFSMaxBufSize)
+ || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
+ goto err_rsp_free;
}
/* check validate negotiate info response matches what we got earlier */
@@ -747,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
/* validate negotiate successful */
cifs_dbg(FYI, "validate negotiate info successful\n");
+ kfree(pneg_rsp);
return 0;
vneg_out:
cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+err_rsp_free:
+ kfree(pneg_rsp);
return -EIO;
}
@@ -1255,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
- struct kvec rsp_iov;
+ struct kvec rsp_iov = { NULL, 0 };
int rc = 0;
int resp_buftype;
int unc_path_len;
@@ -1372,7 +1376,7 @@ tcon_exit:
return rc;
tcon_error_exit:
- if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
}
goto tcon_exit;
@@ -1975,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
} else
iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+ req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
@@ -2191,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /* 4 for rfc1002 length field and 1 for Buffer */
- req->InputBufferOffset =
- cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+
+ /*
+ * We do not use the input buffer (do not send extra byte)
+ */
+ req->InputBufferOffset = 0;
+ inc_rfc1001_len(req, -1);
+
req->OutputBufferLength = cpu_to_le32(output_len);
iov[0].iov_base = (char *)req;
@@ -2233,12 +2244,12 @@ qinf_exit:
}
int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- struct smb2_file_full_ea_info *data)
+ u64 persistent_fid, u64 volatile_fid,
+ int ea_buf_size, struct smb2_file_full_ea_info *data)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
- SMB2_MAX_EA_BUF,
+ ea_buf_size,
sizeof(struct smb2_file_full_ea_info),
(void **)&data,
NULL);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6c9653a130c8..c2ec934be968 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -832,7 +832,7 @@ struct smb2_flush_rsp {
/* Channel field for read and write: exactly one of following flags can be set*/
#define SMB2_CHANNEL_NONE 0x00000000
#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
/* SMB2 read request without RFC1001 length at the beginning */
struct smb2_read_plain_req {
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
char FileName[0]; /* Name to be assigned to new link */
} __packed; /* level 11 Set */
-#define SMB2_MAX_EA_BUF 2048
+#define SMB2_MIN_EA_BUF 2048
+#define SMB2_MAX_EA_BUF 65536
struct smb2_file_full_ea_info { /* encoding of response for level 15 */
__le32 next_entry_offset;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 003217099ef3..e9ab5227e7a8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
+ int ea_buf_size,
struct smb2_file_full_ea_info *data);
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 67367cf1f8cd..99493946e2f9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
+#ifdef CONFIG_CIFS_SMB311
int
generate_smb311signingkey(struct cifs_ses *ses)
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
struct derivation *d;
d = &triplet.signing;
- d->label.iov_base = "SMB2AESCMAC";
- d->label.iov_len = 12;
- d->context.iov_base = "SmbSign";
- d->context.iov_len = 8;
+ d->label.iov_base = "SMBSigningKey";
+ d->label.iov_len = 14;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
d = &triplet.encryption;
- d->label.iov_base = "SMB2AESCCM";
- d->label.iov_len = 11;
- d->context.iov_base = "ServerIn ";
- d->context.iov_len = 10;
+ d->label.iov_base = "SMBC2SCipherKey";
+ d->label.iov_len = 16;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
d = &triplet.decryption;
- d->label.iov_base = "SMB2AESCCM";
- d->label.iov_len = 11;
- d->context.iov_base = "ServerOut";
- d->context.iov_len = 10;
+ d->label.iov_base = "SMBS2CCipherKey";
+ d->label.iov_len = 16;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
return generate_smb3signingkey(ses, &triplet);
}
+#endif /* 311 */
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 622081b97426..24967382a7b1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
*/
over = !dir_emit(ctx, dirent->name, dirent->namelen,
dirent->ino, dirent->type);
- ctx->pos = dirent->off;
+ if (!over)
+ ctx->pos = dirent->off;
}
buf += reclen;
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a619addecafc..321511ed8c42 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
return true;
}
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry)
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+ struct dentry *index)
{
struct dentry *lowerdentry = ovl_dentry_lower(dentry);
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
struct inode *inode;
+ /* Already indexed or could be indexed on copy up? */
+ bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
+
+ if (WARN_ON(upperdentry && indexed && !lowerdentry))
+ return ERR_PTR(-EIO);
if (!realinode)
realinode = d_inode(lowerdentry);
- if (!S_ISDIR(realinode->i_mode) &&
- (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) {
- struct inode *key = d_inode(lowerdentry ?: upperdentry);
+ /*
+ * Copy up origin (lower) may exist for non-indexed upper, but we must
+ * not use lower as hash key in that case.
+ * Hash inodes that are or could be indexed by origin inode and
+ * non-indexed upper inodes that could be hard linked by upper inode.
+ */
+ if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
+ struct inode *key = d_inode(indexed ? lowerdentry :
+ upperdentry);
unsigned int nlink;
inode = iget5_locked(dentry->d_sb, (unsigned long) key,
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 654bea1a5ac9..a12dc10bf726 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
* be treated as stale (i.e. after unlink of the overlay inode).
* We don't know the verification rules for directory and whiteout
* index entries, because they have not been implemented yet, so return
- * EROFS if those entries are found to avoid corrupting an index that
- * was created by a newer kernel.
+ * EINVAL if those entries are found to abort the mount to avoid
+ * corrupting an index that was created by a newer kernel.
*/
- err = -EROFS;
+ err = -EINVAL;
if (d_is_dir(index) || ovl_is_whiteout(index))
goto fail;
- err = -EINVAL;
if (index->d_name.len < sizeof(struct ovl_fh)*2)
goto fail;
@@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
+ if (err == -ENOENT) {
+ index = NULL;
+ goto out;
+ }
pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
@@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
inode = d_inode(index);
if (d_is_negative(index)) {
- if (upper && d_inode(origin)->i_nlink > 1) {
- pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
- d_inode(origin)->i_ino);
- goto fail;
- }
-
- dput(index);
- index = NULL;
+ goto out_dput;
} else if (upper && d_inode(upper) != inode) {
- pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n",
- index, inode->i_ino, d_inode(upper)->i_ino);
- goto fail;
+ goto out_dput;
} else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
/*
@@ -547,6 +541,11 @@ out:
kfree(name.name);
return index;
+out_dput:
+ dput(index);
+ index = NULL;
+ goto out;
+
fail:
dput(index);
index = ERR_PTR(-EIO);
@@ -635,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
if (d.redirect) {
+ err = -ENOMEM;
upperredirect = kstrdup(d.redirect, GFP_KERNEL);
if (!upperredirect)
goto out_put_upper;
@@ -709,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
upperdentry = dget(index);
if (upperdentry || ctr) {
- inode = ovl_get_inode(dentry, upperdentry);
+ inode = ovl_get_inode(dentry, upperdentry, index);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index c706a6f99928..d9a0edd4e57e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -286,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
bool ovl_is_private_xattr(const char *name);
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry);
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+ struct dentry *index);
static inline void ovl_copyattr(struct inode *from, struct inode *to)
{
to->i_uid = from->i_uid;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 0f85ee9c3268..698b74dd750e 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -1021,13 +1021,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
break;
}
err = ovl_verify_index(index, lowerstack, numlower);
- if (err) {
- if (err == -EROFS)
- break;
+ /* Cleanup stale and orphan index entries */
+ if (err && (err == -ESTALE || err == -ENOENT))
err = ovl_cleanup(dir, index);
- if (err)
- break;
- }
+ if (err)
+ break;
+
dput(index);
index = NULL;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 092d150643c1..f5738e96a052 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
{
struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
+ if (!oi)
+ return NULL;
+
oi->cache = NULL;
oi->redirect = NULL;
oi->version = 0;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 56d0e526870c..6526ef0e2a23 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -237,11 +237,13 @@ xfs_file_dax_read(
if (!count)
return 0; /* skip atime */
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
return -EAGAIN;
+ } else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
+
ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read(
trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
return -EAGAIN;
+ } else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
ret = generic_file_read_iter(iocb, to);
@@ -552,9 +555,10 @@ xfs_file_dio_aio_write(
iolock = XFS_IOLOCK_SHARED;
}
- if (!xfs_ilock_nowait(ip, iolock)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, iolock))
return -EAGAIN;
+ } else {
xfs_ilock(ip, iolock);
}
@@ -606,9 +610,10 @@ xfs_file_dax_write(
size_t count;
loff_t pos;
- if (!xfs_ilock_nowait(ip, iolock)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, iolock))
return -EAGAIN;
+ } else {
xfs_ilock(ip, iolock);
}
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index d1b5173ad8f0..d8ca8270f62d 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -77,8 +77,8 @@ void tap_del_queues(struct tap_dev *tap);
int tap_get_minor(dev_t major, struct tap_dev *tap);
void tap_free_minor(dev_t major, struct tap_dev *tap);
int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module);
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d4728bf6a537..5ad10948ea95 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1ea576c8126f..14b74f22d43c 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -372,6 +372,8 @@
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys) \
+ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..c59af8ab753a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 032b55909145..6737a8c9e8c6 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL,
};
-#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_DEFAULT_VALUE (-1)
+#define PM_QOS_LATENCY_ANY S32_MAX
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
-#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 82b171e1aa0b..da803dfc7a39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -231,7 +231,7 @@ struct sctp_datahdr {
__be32 tsn;
__be16 stream;
__be16 ssn;
- __be32 ppid;
+ __u32 ppid;
__u8 payload[0];
};
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
struct sctp_strreset_outreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u32 response_seq;
- __u32 send_reset_at_tsn;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be32 response_seq;
+ __be32 send_reset_at_tsn;
+ __be16 list_of_streams[0];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be16 list_of_streams[0];
};
struct sctp_strreset_tsnreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
+ __be32 request_seq;
};
struct sctp_strreset_addstrm {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 number_of_streams;
- __u16 reserved;
+ __be32 request_seq;
+ __be16 number_of_streams;
+ __be16 reserved;
};
enum {
@@ -752,16 +752,16 @@ enum {
struct sctp_strreset_resp {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
+ __be32 response_seq;
+ __be32 result;
};
struct sctp_strreset_resptsn {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
- __u32 senders_next_tsn;
- __u32 receivers_next_tsn;
+ __be32 response_seq;
+ __be32 result;
+ __be32 senders_next_tsn;
+ __be32 receivers_next_tsn;
};
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 73e97a08d3d0..cf30f5022472 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -9,13 +9,16 @@
/*
* Simple wait queues
*
- * While these are very similar to the other/complex wait queues (wait.h) the
- * most important difference is that the simple waitqueue allows for
- * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
- * times.
+ * While these are very similar to regular wait queues (wait.h) the most
+ * important difference is that the simple waitqueue allows for deterministic
+ * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
*
- * In order to make this so, we had to drop a fair number of features of the
- * other waitqueue code; notably:
+ * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
+ * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
+ * priority task a chance to run.
+ *
+ * Secondly, we had to drop a fair number of features of the other waitqueue
+ * code; notably:
*
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
@@ -24,12 +27,14 @@
* - the exclusive mode; because this requires preserving the list order
* and this is hard.
*
- * - custom wake functions; because you cannot give any guarantees about
- * random code.
- *
- * As a side effect of this; the data structures are slimmer.
+ * - custom wake callback functions; because you cannot give any guarantees
+ * about random code. This also allows swait to be used in RT, such that
+ * raw spinlock can be used for the swait queue head.
*
- * One would recommend using this wait queue where possible.
+ * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
+ * For all the above, note that simple wait queues should _only_ be used under
+ * very specific realtime constraints -- it is best to stick with the regular
+ * wait queues in most cases.
*/
struct task_struct;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 8b237e4afee6..be7c0fab3478 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -159,6 +159,7 @@ static void fq_tin_enqueue(struct fq *fq,
fq_flow_get_default_t get_default_func)
{
struct fq_flow *flow;
+ bool oom;
lockdep_assert_held(&fq->lock);
@@ -180,8 +181,8 @@ static void fq_tin_enqueue(struct fq *fq,
}
__skb_queue_tail(&flow->queue, skb);
-
- if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
+ oom = (fq->memory_usage > fq->memory_limit);
+ while (fq->backlog > fq->limit || oom) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -196,8 +197,10 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
- if (fq->memory_usage > fq->memory_limit)
+ if (oom) {
fq->overmemory++;
+ oom = (fq->memory_usage > fq->memory_limit);
+ }
}
}
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index c49938d1481a..2135c9ba6ac3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -133,6 +133,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
+static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+{
+ return rcu_dereference_check(ireq->ireq_opt,
+ refcount_read(&ireq->req.rsk_refcnt) > 0);
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 04caa246e747..bf73e1675519 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -2,6 +2,7 @@
#define __NET_PKT_CLS_H
#include <linux/pkt_cls.h>
+#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
@@ -28,6 +29,7 @@ struct tcf_block_ext_info {
};
struct tcf_block_cb;
+bool tcf_queue_work(struct work_struct *work);
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 031dffd5836c..07c179dab478 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -273,6 +274,7 @@ struct tcf_block {
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
+ struct work_struct work;
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 13cc4963e905..70fb397f65b0 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
+ __u16 stream_num, __be16 *stream_list,
bool out, bool in);
struct sctp_chunk *sctp_make_strreset_tsnreq(
const struct sctp_association *asoc);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index b8c86ec1a8f5..231dc42f1da6 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
- __u16 stream_num, __u16 *stream_list, gfp_t gfp);
+ __u16 stream_num, __be16 *stream_list, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
const struct sctp_association *asoc, __u16 flags,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 7dc131d62ad5..d96b59f45eba 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -74,10 +74,9 @@ struct strparser {
u32 unrecov_intr : 1;
struct sk_buff **skb_nextp;
- struct timer_list msg_timer;
struct sk_buff *skb_head;
unsigned int need_bytes;
- struct delayed_work delayed_work;
+ struct delayed_work msg_timer_work;
struct work_struct work;
struct strp_stats stats;
struct strp_callbacks cb;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index aa1cc90fdc02..a2510cdef4b5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -816,6 +816,7 @@ struct tcp_skb_cb {
__u32 key;
__u32 flags;
struct bpf_map *map;
+ void *data_end;
} bpf;
};
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f650346aaa1a..0b7b54d898bd 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -645,7 +645,7 @@ union bpf_attr {
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
- * Return: SK_REDIRECT
+ * Return: SK_PASS
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
@@ -887,8 +887,8 @@ struct xdp_md {
};
enum sk_action {
- SK_ABORTED = 0,
- SK_DROP,
+ SK_DROP = 0,
+ SK_PASS,
SK_REDIRECT,
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6cd7d416ca40..48c84a84721e 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -378,7 +378,7 @@ struct sctp_remote_error {
__u16 sre_type;
__u16 sre_flags;
__u32 sre_length;
- __u16 sre_error;
+ __be16 sre_error;
sctp_assoc_t sre_assoc_id;
__u8 sre_data[0];
};
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e75805..856de39d0b89 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
#define SPIDEV_H
#include <linux/types.h>
+#include <linux/ioctl.h>
/* User space versions of kernel symbols for SPI clocking modes,
* matching <linux/spi/spi.h>
diff --git a/init/Kconfig b/init/Kconfig
index 78cb2461012e..3c1faaa2af4a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1033,7 +1033,7 @@ endif
choice
prompt "Compiler optimization level"
- default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+ default CC_OPTIMIZE_FOR_PERFORMANCE
config CC_OPTIMIZE_FOR_PERFORMANCE
bool "Optimize for performance"
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index eef843c3b419..de8f66cad882 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -96,6 +96,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+/* compute the linear packet data range [data, data_end) for skb when
+ * sk_skb type programs are in use.
+ */
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -117,7 +125,8 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
preempt_enable();
skb->sk = NULL;
- return rc;
+ return rc == SK_PASS ?
+ (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
}
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d851df22f5c5..04892a82f6ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
__cpuhp_kick_ap(st);
}
+ /*
+ * Clean up the leftovers so the next hotplug operation wont use stale
+ * data.
+ */
+ st->node = st->last = NULL;
return ret;
}
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 5270a54b9fa4..c26c5bb6b491 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
}
/**
- * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
+ * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
* @d: irq_data
+ *
+ * This generic implementation of the irq_mask_ack method is for chips
+ * with separate enable/disable registers instead of a single mask
+ * register and where a pending interrupt is acknowledged by setting a
+ * bit.
+ *
+ * Note: This is the only permutation currently used. Similar generic
+ * functions should be added here if other permutations are required.
*/
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(gc, mask, ct->regs.mask);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 64d0edf428f8..a2dccfe1acec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,7 @@ enum {
* attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
+ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
/* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */
- struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = mutex_is_locked(&pool->manager_arb);
+ bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- /*
- * Anyone who successfully grabs manager_arb wins the arbitration
- * and becomes the manager. mutex_trylock() on pool->manager_arb
- * failure while holding pool->lock reliably indicates that someone
- * else is managing the pool and the worker which failed trylock
- * can proceed to executing work items. This means that anyone
- * grabbing manager_arb is responsible for actually performing
- * manager duties. If manager_arb is grabbed and released without
- * actual management, the pool may stall indefinitely.
- */
- if (!mutex_trylock(&pool->manager_arb))
+ if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
+
+ pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
- mutex_unlock(&pool->manager_arb);
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+ wake_up(&wq_manager_wait);
return true;
}
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool);
- mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers);
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node);
/*
- * Become the manager and destroy all workers. Grabbing
- * manager_arb prevents @pool's workers from blocking on
- * attach_mutex.
+ * Become the manager and destroy all workers. This prevents
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
*/
- mutex_lock(&pool->manager_arb);
-
spin_lock_irq(&pool->lock);
+ wait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
- mutex_unlock(&pool->manager_arb);
-
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 155c55d8db5f..4e53be8bc590 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
- /* Otherwise we can just insert a new node ahead of the old
- * one.
+ /* Otherwise all the old leaves cluster in the same slot, but
+ * the new leaf wants to go into a different slot - so we
+ * create a new node (n0) to hold the new leaf and a pointer to
+ * a new node (n1) holding all the old leaves.
+ *
+ * This can be done by falling through to the node splitting
+ * path.
*/
- goto present_leaves_cluster_but_not_new_leaf;
+ pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
- /* We need to split the current node; we know that the node doesn't
- * simply contain a full set of leaves that cluster together (it
- * contains meta pointers and/or non-clustering leaves).
+ /* We need to split the current node. The node must contain anything
+ * from a single leaf (in the one leaf case, this leaf will cluster
+ * with the new leaf) and the rest meta-pointers, to all leaves, some
+ * of which may cluster.
+ *
+ * It won't contain the case in which all the current leaves plus the
+ * new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
- * leaves in the node and the new leaf.
+ * leaves in the node and the new leaf. The current meta pointers can
+ * just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
-present_leaves_cluster_but_not_new_leaf:
- /* All the old leaves cluster in the same slot, but the new leaf wants
- * to go into a different slot, so we create a new node to hold the new
- * leaf and a pointer to a new node holding all the old leaves.
- */
- pr_devel("present leaves cluster but not new leaf\n");
-
- new_n0->back_pointer = node->back_pointer;
- new_n0->parent_slot = node->parent_slot;
- new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
- new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
- new_n1->parent_slot = edit->segment_cache[0];
- new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
- edit->adjust_count_on = new_n0;
-
- for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
- new_n1->slots[i] = node->slots[i];
-
- new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
- edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
- edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
- edit->set[0].to = assoc_array_node_to_ptr(new_n0);
- edit->excised_meta[0] = assoc_array_node_to_ptr(node);
- pr_devel("<--%s() = ok [insert node before]\n", __func__);
- return true;
-
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
diff --git a/net/core/filter.c b/net/core/filter.c
index b79c44cc8145..721c30889033 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1845,14 +1845,15 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+ /* If user passes invalid input drop the packet. */
if (unlikely(flags))
- return SK_ABORTED;
+ return SK_DROP;
tcb->bpf.key = key;
tcb->bpf.flags = flags;
tcb->bpf.map = map;
- return SK_REDIRECT;
+ return SK_PASS;
}
struct sock *do_sk_redirect_map(struct sk_buff *skb)
@@ -4475,6 +4476,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
+static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
+{
+ struct bpf_insn *insn = insn_buf;
+ int off;
+
+ switch (si->off) {
+ case offsetof(struct __sk_buff, data_end):
+ off = si->off;
+ off -= offsetof(struct __sk_buff, data_end);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct tcp_skb_cb, bpf.data_end);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+ si->src_reg, off);
+ break;
+ default:
+ return bpf_convert_ctx_access(type, si, insn_buf, prog,
+ target_size);
+ }
+
+ return insn - insn_buf;
+}
+
const struct bpf_verifier_ops sk_filter_verifier_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
@@ -4565,7 +4591,7 @@ const struct bpf_prog_ops sock_ops_prog_ops = {
const struct bpf_verifier_ops sk_skb_verifier_ops = {
.get_func_proto = sk_skb_func_proto,
.is_valid_access = sk_skb_is_valid_access,
- .convert_ctx_access = bpf_convert_ctx_access,
+ .convert_ctx_access = sk_skb_convert_ctx_access,
.gen_prologue = sk_skb_prologue,
};
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0490916864f9..e65fcb45c3f6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -495,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
ireq->ir_rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- rcu_dereference(ireq->ireq_opt));
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index de91c48b6806..4d1ccf87c59c 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -486,14 +486,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
if (!ethernet)
return -EINVAL;
ethernet_dev = of_find_net_device_by_node(ethernet);
+ if (!ethernet_dev)
+ return -EPROBE_DEFER;
} else {
ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
+ if (!ethernet_dev)
+ return -EPROBE_DEFER;
dev_put(ethernet_dev);
}
- if (!ethernet_dev)
- return -EPROBE_DEFER;
-
if (!dst->cpu_dp) {
dst->cpu_dp = port;
dst->cpu_dp->master = ethernet_dev;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ca03a1dcbc8f..4ca46dc08e63 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -541,7 +541,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct ip_options_rcu *opt;
struct rtable *rt;
- opt = rcu_dereference(ireq->ireq_opt);
+ opt = ireq_opt_deref(ireq);
+
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 1e47818e38c7..c891235b4966 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
static int ipip_err(struct sk_buff *skb, u32 info)
{
-
-/* All the routers (except for Linux) return only
- 8 bytes of packet payload. It means, that precise relaying of
- ICMP in the real Internet is absolutely infeasible.
- */
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ */
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
- struct ip_tunnel *t;
- int err;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
+ struct ip_tunnel *t;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_SR_FAILED:
+ /* Impossible event. */
+ goto out;
+ default:
+ /* All others are translated to HOST_UNREACH.
+ * rfc2003 contains "deep thoughts" about NET_UNREACH,
+ * I believe they are just ether pollution. --ANK
+ */
+ break;
+ }
+ break;
+
+ case ICMP_TIME_EXCEEDED:
+ if (code != ICMP_EXC_TTL)
+ goto out;
+ break;
+
+ case ICMP_REDIRECT:
+ break;
+
+ default:
+ goto out;
+ }
- err = -ENOENT;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
- if (!t)
+ if (!t) {
+ err = -ENOENT;
goto out;
+ }
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, iph->protocol, 0);
- err = 0;
+ ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
+ iph->protocol, 0);
goto out;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- iph->protocol, 0);
- err = 0;
+ ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
goto out;
}
- if (t->parms.iph.daddr == 0)
+ if (t->parms.iph.daddr == 0) {
+ err = -ENOENT;
goto out;
+ }
- err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7c1dae6493c3..0162c577bb9c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -881,7 +881,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- rcu_dereference(ireq->ireq_opt));
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index aab6e7145013..a69a34f57330 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -781,8 +781,10 @@ static void tcp_tsq_handler(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tp->lost_out > tp->retrans_out &&
- tp->snd_cwnd > tcp_packets_in_flight(tp))
+ tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+ tcp_mstamp_refresh(tp);
tcp_xmit_retransmit_queue(sk);
+ }
tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
@@ -2307,6 +2309,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sent_pkts = 0;
+ tcp_mstamp_refresh(tp);
if (!push_one) {
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
@@ -2318,7 +2321,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
max_segs = tcp_tso_segs(sk, mss_now);
- tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -2911,8 +2913,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
-ENOBUFS;
} tcp_skb_tsorted_restore(skb);
- if (!err)
+ if (!err) {
tcp_update_skb_after_send(tp, skb);
+ tcp_rate_skb_sent(sk, skb);
+ }
} else {
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 241841fb479c..3e10c51e7e0c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
- break;
+ if (code != ICMPV6_PORT_UNREACH)
+ break;
+ return;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
+ break;
}
- break;
+ return;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
- break;
+ return;
case ICMPV6_PKT_TOOBIG:
mtu = be32_to_cpu(info) - offset - t->tun_hlen;
if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
- break;
+ return;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
__u32 *pmtu, __be16 proto)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
- __be16 protocol = (dev->type == ARPHRD_ETHER) ?
- htons(ETH_P_TEB) : proto;
+ struct dst_entry *dst = skb_dst(skb);
+ __be16 protocol;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
tunnel->o_seqno++;
/* Push GRE header. */
+ protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+ /* TooBig packet may have updated dst->dev's mtu */
+ if (dst && dst_mtu(dst) > dst->dev->mtu)
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a354f1939e49..fb15d3b97cb2 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
- if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
- ret = drv_set_bitrate_mask(local, sdata, mask);
- if (ret)
- return ret;
- }
-
/*
* If active validate the setting and reject it if it doesn't leave
* at least one basic rate usable, since we really have to be able
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return -EINVAL;
}
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+ ret = drv_set_bitrate_mask(local, sdata, mask);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < NUM_NL80211_BANDS; i++) {
struct ieee80211_supported_band *sband = wiphy->bands[i];
int j;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ae995c8480db..938049395f90 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
+#include <crypto/algapi.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
ieee80211_key_free_common(key);
}
+static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_key *old,
+ struct ieee80211_key *new)
+{
+ u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
+ u8 *tk_old, *tk_new;
+
+ if (!old || new->conf.keylen != old->conf.keylen)
+ return false;
+
+ tk_old = old->conf.key;
+ tk_new = new->conf.key;
+
+ /*
+ * In station mode, don't compare the TX MIC key, as it's never used
+ * and offloaded rekeying may not care to send it to the host. This
+ * is the case in iwlwifi, for example.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+ new->conf.keylen == WLAN_KEY_LEN_TKIP &&
+ !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
+ memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
+ memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ tk_old = tkip_old;
+ tk_new = tkip_new;
+ }
+
+ return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
+}
+
int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
@@ -634,8 +668,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* Silently accept key re-installation without really installing the
* new version of the key to avoid nonce reuse or replay issues.
*/
- if (old_key && key->conf.keylen == old_key->conf.keylen &&
- !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
+ if (ieee80211_key_identical(sdata, old_key, key)) {
ieee80211_key_free_unused(key);
ret = 0;
goto out;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 6ab39dbcca01..8557a1cae041 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
}
}
- rds_ib_set_wr_signal_state(ic, send, 0);
+ rds_ib_set_wr_signal_state(ic, send, false);
/*
* Always signal the last one if we're stopping due to flow control.
*/
- if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
+ rds_ib_set_wr_signal_state(ic, send, true);
+ send->s_wr.send_flags |= IB_SEND_SOLICITED;
+ }
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
- if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- prev->s_wr.send_flags |= IB_SEND_SIGNALED;
- nr_sig++;
- }
+ if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
+ nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
ic->i_data_op = NULL;
}
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
send->s_atomic_wr.swap_mask = 0;
}
+ send->s_wr.send_flags = 0;
nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
send->s_atomic_wr.wr.num_sge = 1;
send->s_atomic_wr.wr.next = NULL;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ec986ae52808..a9f9a2ccc664 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -264,6 +264,7 @@ static int __init sample_init_module(void)
static void __exit sample_cleanup_module(void)
{
+ rcu_barrier();
tcf_unregister_action(&act_sample_ops, &sample_net_ops);
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0e96cdae9995..d9d54b367d23 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -77,6 +77,8 @@ out:
}
EXPORT_SYMBOL(register_tcf_proto_ops);
+static struct workqueue_struct *tc_filter_wq;
+
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
* tcf_proto_ops's destroy() handler.
*/
rcu_barrier();
+ flush_workqueue(tc_filter_wq);
write_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head) {
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
+bool tcf_queue_work(struct work_struct *work)
+{
+ return queue_work(tc_filter_wq, work);
+}
+EXPORT_SYMBOL(tcf_queue_work);
+
/* Select new prio value from the range, managed by kernel. */
static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@@ -308,27 +317,30 @@ int tcf_block_get(struct tcf_block **p_block,
}
EXPORT_SYMBOL(tcf_block_get);
-void tcf_block_put_ext(struct tcf_block *block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+static void tcf_block_put_final(struct work_struct *work)
{
+ struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp;
- if (!block)
- return;
-
- tcf_block_offload_unbind(block, q, ei);
-
- /* XXX: Standalone actions are not allowed to jump to any chain, and
- * bound actions should be all removed after flushing. However,
- * filters are destroyed in RCU callbacks, we have to hold the chains
- * first, otherwise we would always race with RCU callbacks on this list
- * without proper locking.
- */
+ /* At this point, all the chains should have refcnt == 1. */
+ rtnl_lock();
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_put(chain);
+ rtnl_unlock();
+ kfree(block);
+}
- /* Wait for existing RCU callbacks to cool down. */
- rcu_barrier();
+/* XXX: Standalone actions are not allowed to jump to any chain, and bound
+ * actions should be all removed after flushing. However, filters are destroyed
+ * in RCU callbacks, we have to hold the chains first, otherwise we would
+ * always race with RCU callbacks on this list without proper locking.
+ */
+static void tcf_block_put_deferred(struct work_struct *work)
+{
+ struct tcf_block *block = container_of(work, struct tcf_block, work);
+ struct tcf_chain *chain;
+ rtnl_lock();
/* Hold a refcnt for all chains, except 0, in case they are gone. */
list_for_each_entry(chain, &block->chain_list, list)
if (chain->index)
@@ -338,13 +350,31 @@ void tcf_block_put_ext(struct tcf_block *block,
list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
- /* Wait for RCU callbacks to release the reference count. */
+ INIT_WORK(&block->work, tcf_block_put_final);
+ /* Wait for RCU callbacks to release the reference count and make
+ * sure their works have been queued before this.
+ */
rcu_barrier();
+ tcf_queue_work(&block->work);
+ rtnl_unlock();
+}
- /* At this point, all the chains should have refcnt == 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
- tcf_chain_put(chain);
- kfree(block);
+void tcf_block_put_ext(struct tcf_block *block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
+{
+ if (!block)
+ return;
+
+ tcf_block_offload_unbind(block, q, ei);
+
+ INIT_WORK(&block->work, tcf_block_put_deferred);
+ /* Wait for existing RCU callbacks to cool down, make sure their works
+ * have been queued before this. We can not flush pending works here
+ * because we are holding the RTNL lock.
+ */
+ rcu_barrier();
+ tcf_queue_work(&block->work);
}
EXPORT_SYMBOL(tcf_block_put_ext);
@@ -354,6 +384,7 @@ void tcf_block_put(struct tcf_block *block)
tcf_block_put_ext(block, NULL, block->q, &ei);
}
+
EXPORT_SYMBOL(tcf_block_put);
struct tcf_block_cb {
@@ -1051,6 +1082,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
#ifdef CONFIG_NET_CLS_ACT
LIST_HEAD(actions);
+ ASSERT_RTNL();
tcf_exts_to_list(exts, &actions);
tcf_action_destroy(&actions, TCA_ACT_UNBIND);
kfree(exts->actions);
@@ -1229,6 +1261,10 @@ EXPORT_SYMBOL(tc_setup_cb_call);
static int __init tc_filter_init(void)
{
+ tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
+ if (!tc_filter_wq)
+ return -ENOMEM;
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 700b345b07f9..871351358c10 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -35,7 +35,10 @@ struct basic_filter {
struct tcf_result res;
struct tcf_proto *tp;
struct list_head link;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -84,15 +87,26 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
-static void basic_delete_filter(struct rcu_head *head)
+static void basic_delete_filter_work(struct work_struct *work)
{
- struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+ struct basic_filter *f = container_of(work, struct basic_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
+ rtnl_unlock();
+
kfree(f);
}
+static void basic_delete_filter(struct rcu_head *head)
+{
+ struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+
+ INIT_WORK(&f->work, basic_delete_filter_work);
+ tcf_queue_work(&f->work);
+}
+
static void basic_destroy(struct tcf_proto *tp)
{
struct basic_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 0f8b51061c39..5f701c8670a2 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -50,7 +50,10 @@ struct cls_bpf_prog {
struct sock_filter *bpf_ops;
const char *bpf_name;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -269,9 +272,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
kfree(prog);
}
+static void cls_bpf_delete_prog_work(struct work_struct *work)
+{
+ struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
+
+ rtnl_lock();
+ __cls_bpf_delete_prog(prog);
+ rtnl_unlock();
+}
+
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
{
- __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
+ struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
+
+ INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
+ tcf_queue_work(&prog->work);
}
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d48452f87975..a97e069bee89 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,7 +23,10 @@ struct cls_cgroup_head {
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
+static void cls_cgroup_destroy_work(struct work_struct *work)
+{
+ struct cls_cgroup_head *head = container_of(work,
+ struct cls_cgroup_head,
+ work);
+ rtnl_lock();
+ tcf_exts_destroy(&head->exts);
+ tcf_em_tree_destroy(&head->ematches);
+ kfree(head);
+ rtnl_unlock();
+}
+
static void cls_cgroup_destroy_rcu(struct rcu_head *root)
{
struct cls_cgroup_head *head = container_of(root,
struct cls_cgroup_head,
rcu);
- tcf_exts_destroy(&head->exts);
- tcf_em_tree_destroy(&head->ematches);
- kfree(head);
+ INIT_WORK(&head->work, cls_cgroup_destroy_work);
+ tcf_queue_work(&head->work);
}
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6b29cef90bec..9c08fcdaf41c 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -57,7 +57,10 @@ struct flow_filter {
u32 divisor;
u32 baseclass;
u32 hashrnd;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static inline u32 addr_fold(void *addr)
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
-static void flow_destroy_filter(struct rcu_head *head)
+static void flow_destroy_filter_work(struct work_struct *work)
{
- struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+ struct flow_filter *f = container_of(work, struct flow_filter, work);
+ rtnl_lock();
del_timer_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
kfree(f);
+ rtnl_unlock();
+}
+
+static void flow_destroy_filter(struct rcu_head *head)
+{
+ struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+
+ INIT_WORK(&f->work, flow_destroy_filter_work);
+ tcf_queue_work(&f->work);
}
static int flow_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 16f58abaa697..35cb6d684e44 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -87,7 +87,11 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+ struct net_device *hw_dev;
};
static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
@@ -189,12 +193,22 @@ static int fl_init(struct tcf_proto *tp)
return 0;
}
-static void fl_destroy_filter(struct rcu_head *head)
+static void fl_destroy_filter_work(struct work_struct *work)
{
- struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+ struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+ struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+ INIT_WORK(&f->work, fl_destroy_filter_work);
+ tcf_queue_work(&f->work);
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index aa1e1f3bd20c..5908f56f76da 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -47,7 +47,10 @@ struct fw_filter {
#endif /* CONFIG_NET_CLS_IND */
struct tcf_exts exts;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static u32 fw_hash(u32 handle)
@@ -122,12 +125,22 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static void fw_delete_filter(struct rcu_head *head)
+static void fw_delete_filter_work(struct work_struct *work)
{
- struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+ struct fw_filter *f = container_of(work, struct fw_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void fw_delete_filter(struct rcu_head *head)
+{
+ struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+
+ INIT_WORK(&f->work, fw_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void fw_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 70e78d74f6d3..95dc997873e8 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,7 +21,10 @@ struct cls_mall_head {
struct tcf_result res;
u32 handle;
u32 flags;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
return 0;
}
+static void mall_destroy_work(struct work_struct *work)
+{
+ struct cls_mall_head *head = container_of(work, struct cls_mall_head,
+ work);
+ rtnl_lock();
+ tcf_exts_destroy(&head->exts);
+ kfree(head);
+ rtnl_unlock();
+}
+
static void mall_destroy_rcu(struct rcu_head *rcu)
{
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
rcu);
- tcf_exts_destroy(&head->exts);
- kfree(head);
+ INIT_WORK(&head->work, mall_destroy_work);
+ tcf_queue_work(&head->work);
}
static void mall_destroy_hw_filter(struct tcf_proto *tp,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 9ddde65915d2..4b14ccd8b8f2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -57,7 +57,10 @@ struct route4_filter {
u32 handle;
struct route4_bucket *bkt;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static void route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter_work(struct work_struct *work)
{
- struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+ struct route4_filter *f = container_of(work, struct route4_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void route4_delete_filter(struct rcu_head *head)
+{
+ struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+
+ INIT_WORK(&f->work, route4_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void route4_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b1f6ed48bc72..bdbc541787f8 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -97,7 +97,10 @@ struct rsvp_filter {
u32 handle;
struct rsvp_session *sess;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static void rsvp_delete_filter_rcu(struct rcu_head *head)
+static void rsvp_delete_filter_work(struct work_struct *work)
{
- struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+ struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
+{
+ struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
+ INIT_WORK(&f->work, rsvp_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index d732b5474a4d..d6abfa6757f2 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -28,14 +28,20 @@
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
struct tcindex_filter {
u16 key;
struct tcindex_filter_result result;
struct tcindex_filter __rcu *next;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
@@ -136,12 +142,34 @@ static int tcindex_init(struct tcf_proto *tp)
return 0;
}
+static void tcindex_destroy_rexts_work(struct work_struct *work)
+{
+ struct tcindex_filter_result *r;
+
+ r = container_of(work, struct tcindex_filter_result, work);
+ rtnl_lock();
+ tcf_exts_destroy(&r->exts);
+ rtnl_unlock();
+}
+
static void tcindex_destroy_rexts(struct rcu_head *head)
{
struct tcindex_filter_result *r;
r = container_of(head, struct tcindex_filter_result, rcu);
- tcf_exts_destroy(&r->exts);
+ INIT_WORK(&r->work, tcindex_destroy_rexts_work);
+ tcf_queue_work(&r->work);
+}
+
+static void tcindex_destroy_fexts_work(struct work_struct *work)
+{
+ struct tcindex_filter *f = container_of(work, struct tcindex_filter,
+ work);
+
+ rtnl_lock();
+ tcf_exts_destroy(&f->result.exts);
+ kfree(f);
+ rtnl_unlock();
}
static void tcindex_destroy_fexts(struct rcu_head *head)
@@ -149,8 +177,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
struct tcindex_filter *f = container_of(head, struct tcindex_filter,
rcu);
- tcf_exts_destroy(&f->result.exts);
- kfree(f);
+ INIT_WORK(&f->work, tcindex_destroy_fexts_work);
+ tcf_queue_work(&f->work);
}
static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 9ff17159fb61..86145867b424 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -69,7 +69,10 @@ struct tc_u_knode {
u32 __percpu *pcpu_success;
#endif
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
*/
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
* this the u32_delete_key_rcu variant does not free the percpu
* statistics.
*/
+static void u32_delete_key_work(struct work_struct *work)
+{
+ struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+ rtnl_lock();
+ u32_destroy_key(key->tp, key, false);
+ rtnl_unlock();
+}
+
static void u32_delete_key_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
- u32_destroy_key(key->tp, key, false);
+ INIT_WORK(&key->work, u32_delete_key_work);
+ tcf_queue_work(&key->work);
}
/* u32_delete_key_freepf_rcu is the rcu callback variant
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
* for the variant that should be used with keys return from
* u32_init_knode()
*/
+static void u32_delete_key_freepf_work(struct work_struct *work)
+{
+ struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+ rtnl_lock();
+ u32_destroy_key(key->tp, key, true);
+ rtnl_unlock();
+}
+
static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
- u32_destroy_key(key->tp, key, true);
+ INIT_WORK(&key->work, u32_delete_key_freepf_work);
+ tcf_queue_work(&key->work);
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a9ac912f1d67..b6c4f536876b 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
+ if (!handle)
+ return NULL;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 34f10e75f3b9..621b5ca3fd1c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -794,7 +794,7 @@ hit:
struct sctp_hash_cmp_arg {
const union sctp_addr *paddr;
const struct net *net;
- u16 lport;
+ __be16 lport;
};
static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -820,37 +820,37 @@ out:
return err;
}
-static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
const union sctp_addr *paddr = &t->ipaddr;
const struct net *net = sock_net(t->asoc->base.sk);
- u16 lport = htons(t->asoc->base.bind_addr.port);
- u32 addr;
+ __be16 lport = htons(t->asoc->base.bind_addr.port);
+ __u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
- addr = paddr->v4.sin_addr.s_addr;
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
- return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}
-static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
{
const struct sctp_hash_cmp_arg *x = data;
const union sctp_addr *paddr = x->paddr;
const struct net *net = x->net;
- u16 lport = x->lport;
- u32 addr;
+ __be16 lport = x->lport;
+ __u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
- addr = paddr->v4.sin_addr.s_addr;
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
- return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 51c488769590..a6dfa86c0201 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
/* Was this packet marked by Explicit Congestion Notification? */
static int sctp_v6_is_ce(const struct sk_buff *skb)
{
- return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20);
+ return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
}
/* Dump the v6 addr to the seq file. */
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
net = sock_net(&opt->inet.sk);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
- if (!dev ||
- !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
+ if (!dev || !(opt->inet.freebind ||
+ net->ipv6.sysctl.ip_nonlocal_bind ||
+ ipv6_chk_addr(net, &addr->v6.sin6_addr,
+ dev, 0))) {
rcu_read_unlock();
return 0;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ca8f196b6c6c..514465b03829 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = flags;
param.param_hdr.length = htons(paramlen + addr_param_len);
- param.crr_id = i;
+ param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = SCTP_PARAM_DEL_IP;
param.param_hdr.length = htons(paramlen + addr_param_len);
- param.crr_id = i;
+ param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
*/
struct sctp_chunk *sctp_make_strreset_req(
const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
+ __u16 stream_num, __be16 *stream_list,
bool out, bool in)
{
struct sctp_strreset_outreq outreq;
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
{
struct sctp_reconf_chunk *hdr;
union sctp_params param;
- __u16 last = 0, cnt = 0;
+ __be16 last = 0;
+ __u16 cnt = 0;
hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
sctp_walk_params(param, hdr, params) {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1c2699b424af..df94d77401e7 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1629,12 +1629,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_INIT_FAILED:
- sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
+ sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc, event_type,
- subtype, chunk, cmd->obj.err);
+ subtype, chunk, cmd->obj.u32);
break;
case SCTP_CMD_INIT_COUNTER_INC:
@@ -1702,8 +1702,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = cmd->obj.be32;
- sackh.a_rwnd = asoc->peer.rwnd +
- asoc->outqueue.outstanding_bytes;
+ sackh.a_rwnd = htonl(asoc->peer.rwnd +
+ asoc->outqueue.outstanding_bytes);
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
chunk->subh.sack_hdr = &sackh;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c75acdf71a6f..b029757bea03 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -171,6 +171,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sk_mem_charge(sk, chunk->skb->truesize);
}
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+ skb_orphan(chunk->skb);
+}
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ void (*cb)(struct sctp_chunk *))
+
+{
+ struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_transport *t;
+ struct sctp_chunk *chunk;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->retransmit, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->sacked, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->abandoned, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->out_chunk_list, list)
+ cb(chunk);
+}
+
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
@@ -8379,7 +8409,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+ sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
+ sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 5ea33a2c453b..03764fc2b652 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -261,6 +261,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
__u16 i, str_nums, *str_list;
struct sctp_chunk *chunk;
int retval = -EINVAL;
+ __be16 *nstr_list;
bool out, in;
if (!asoc->peer.reconf_capable ||
@@ -291,13 +292,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
if (str_list[i] >= stream->incnt)
goto out;
+ nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
+ if (!nstr_list) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < str_nums; i++)
- str_list[i] = htons(str_list[i]);
+ nstr_list[i] = htons(str_list[i]);
- chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
+ chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
- for (i = 0; i < str_nums; i++)
- str_list[i] = ntohs(str_list[i]);
+ kfree(nstr_list);
if (!chunk) {
retval = -ENOMEM;
@@ -442,7 +448,7 @@ out:
}
static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
- struct sctp_association *asoc, __u32 resp_seq,
+ struct sctp_association *asoc, __be32 resp_seq,
__be16 type)
{
struct sctp_chunk *chunk = asoc->strreset_chunk;
@@ -482,8 +488,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
{
struct sctp_strreset_outreq *outreq = param.v;
struct sctp_stream *stream = &asoc->stream;
- __u16 i, nums, flags = 0, *str_p = NULL;
__u32 result = SCTP_STRRESET_DENIED;
+ __u16 i, nums, flags = 0;
+ __be16 *str_p = NULL;
__u32 request_seq;
request_seq = ntohl(outreq->request_seq);
@@ -576,8 +583,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_chunk *chunk = NULL;
- __u16 i, nums, *str_p;
__u32 request_seq;
+ __u16 i, nums;
+ __be16 *str_p;
request_seq = ntohl(inreq->request_seq);
if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@@ -897,7 +905,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
struct sctp_strreset_outreq *outreq;
- __u16 *str_p;
+ __be16 *str_p;
outreq = (struct sctp_strreset_outreq *)req;
str_p = outreq->list_of_streams;
@@ -922,7 +930,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums, str_p, GFP_ATOMIC);
} else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
struct sctp_strreset_inreq *inreq;
- __u16 *str_p;
+ __be16 *str_p;
/* if the result is performed, it's impossible for inreq */
if (result == SCTP_STRRESET_PERFORMED)
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 67abc0194f30..5447228bf1a0 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
- __u16 *stream_list, gfp_t gfp)
+ __be16 *stream_list, gfp_t gfp)
{
struct sctp_stream_reset_event *sreset;
struct sctp_ulpevent *event;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d4ea46a5f233..c5fda15ba319 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
{
/* Unrecoverable error in receive */
- del_timer(&strp->msg_timer);
+ cancel_delayed_work(&strp->msg_timer_work);
if (strp->stopped)
return;
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
static void strp_start_timer(struct strparser *strp, long timeo)
{
if (timeo)
- mod_timer(&strp->msg_timer, timeo);
+ mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
}
/* Lower lock held */
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
eaten += (cand_len - extra);
/* Hurray, we have a new message! */
- del_timer(&strp->msg_timer);
+ cancel_delayed_work(&strp->msg_timer_work);
strp->skb_head = NULL;
STRP_STATS_INCR(strp->stats.msgs);
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
do_strp_work(container_of(w, struct strparser, work));
}
-static void strp_msg_timeout(unsigned long arg)
+static void strp_msg_timeout(struct work_struct *w)
{
- struct strparser *strp = (struct strparser *)arg;
+ struct strparser *strp = container_of(w, struct strparser,
+ msg_timer_work.work);
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.msg_timeouts);
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
- setup_timer(&strp->msg_timer, strp_msg_timeout,
- (unsigned long)strp);
-
+ INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
INIT_WORK(&strp->work, strp_work);
return 0;
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
{
WARN_ON(!strp->stopped);
- del_timer_sync(&strp->msg_timer);
+ cancel_delayed_work_sync(&strp->msg_timer_work);
cancel_work_sync(&strp->work);
if (strp->skb_head) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4b00302e1867..6160d17a31c4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task)
rpc_count_iostats(task, task->tk_client->cl_metrics);
spin_lock(&xprt->recv_lock);
if (!list_empty(&req->rq_list)) {
- list_del(&req->rq_list);
+ list_del_init(&req->rq_list);
xprt_wait_on_pinned_rqst(req);
}
spin_unlock(&xprt->recv_lock);
@@ -1444,6 +1444,23 @@ out:
return xprt;
}
+static void xprt_destroy_cb(struct work_struct *work)
+{
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, task_cleanup);
+
+ rpc_xprt_debugfs_unregister(xprt);
+ rpc_destroy_wait_queue(&xprt->binding);
+ rpc_destroy_wait_queue(&xprt->pending);
+ rpc_destroy_wait_queue(&xprt->sending);
+ rpc_destroy_wait_queue(&xprt->backlog);
+ kfree(xprt->servername);
+ /*
+ * Tear down transport state and free the rpc_xprt
+ */
+ xprt->ops->destroy(xprt);
+}
+
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
* @xprt: transport to destroy
@@ -1453,22 +1470,19 @@ static void xprt_destroy(struct rpc_xprt *xprt)
{
dprintk("RPC: destroying transport %p\n", xprt);
- /* Exclude transport connect/disconnect handlers */
+ /*
+ * Exclude transport connect/disconnect handlers and autoclose
+ */
wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
del_timer_sync(&xprt->timer);
- rpc_xprt_debugfs_unregister(xprt);
- rpc_destroy_wait_queue(&xprt->binding);
- rpc_destroy_wait_queue(&xprt->pending);
- rpc_destroy_wait_queue(&xprt->sending);
- rpc_destroy_wait_queue(&xprt->backlog);
- cancel_work_sync(&xprt->task_cleanup);
- kfree(xprt->servername);
/*
- * Tear down transport state and free the rpc_xprt
+ * Destroy sockets etc from the system workqueue so they can
+ * safely flush receive work running on rpciod.
*/
- xprt->ops->destroy(xprt);
+ INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
+ schedule_work(&xprt->task_cleanup);
}
static void xprt_destroy_kref(struct kref *kref)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
+ if (!net_eq(sock_net(sk), net))
+ goto out;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f38ed490e42b..29bf453a0d5f 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
return -EOPNOTSUPP;
if (wdev->current_bss) {
- if (!prev_bssid)
- return -EALREADY;
- if (prev_bssid &&
- !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
- return -ENOTCONN;
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -1106,11 +1101,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->connect_keys)) {
- kzfree(wdev->connect_keys);
- wdev->connect_keys = NULL;
+ /*
+ * If we have an ssid_len, we're trying to connect or are
+ * already connected, so reject a new SSID unless it's the
+ * same (which is the case for re-association.)
+ */
+ if (wdev->ssid_len &&
+ (wdev->ssid_len != connect->ssid_len ||
+ memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+ return -EALREADY;
+
+ /*
+ * If connected, reject (re-)association unless prev_bssid
+ * matches the current BSSID.
+ */
+ if (wdev->current_bss) {
+ if (!prev_bssid)
+ return -EALREADY;
+ if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+ return -ENOTCONN;
}
+ /*
+ * Reject if we're in the process of connecting with WEP,
+ * this case isn't very interesting and trying to handle
+ * it would make the code much more complex.
+ */
+ if (wdev->connect_keys)
+ return -EINPROGRESS;
+
cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
rdev->wiphy.ht_capa_mod_mask);
@@ -1161,7 +1180,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
if (err) {
wdev->connect_keys = NULL;
- wdev->ssid_len = 0;
+ /*
+ * This could be reassoc getting refused, don't clear
+ * ssid_len in that case.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
return err;
}
@@ -1188,6 +1212,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
else if (wdev->ssid_len)
err = rdev_disconnect(rdev, dev, reason);
+ /*
+ * Clear ssid_len unless we actually were fully connected,
+ * in which case cfg80211_disconnected() will take care of
+ * this later.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
+
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4838329bb43a..b669c624a1ec 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1572,6 +1572,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
+ if (!dst_prev)
+ dst0 = dst1;
+ else
+ /* Ref count is taken during xfrm_alloc_dst()
+ * No need to do dst_clone() on dst1
+ */
+ dst_prev->child = dst1;
+
if (xfrm[i]->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(xfrm[i],
xfrm_af2proto(family));
@@ -1583,14 +1591,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
} else
inner_mode = xfrm[i]->inner_mode;
- if (!dst_prev)
- dst0 = dst1;
- else
- /* Ref count is taken during xfrm_alloc_dst()
- * No need to do dst_clone() on dst1
- */
- dst_prev->child = dst1;
-
xdst->route = dst;
dst_copy_metrics(dst1, dst);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b997f1395357..e44a0fed48dd 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1693,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+ BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+ xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+ return 0;
+}
+
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
- BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
- sizeof(cb->args) - sizeof(cb->args[0]));
-
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
- if (!cb->args[0]) {
- cb->args[0] = 1;
- xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
- }
-
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
@@ -2474,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
@@ -2487,6 +2490,7 @@ static const struct xfrm_link {
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+ .start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2539,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct netlink_dump_control c = {
+ .start = link->start,
.dump = link->dump,
.done = link->done,
};
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 446beb7ac48d..5522692100ba 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,7 +78,7 @@ static int simple_thread_fn(void *arg)
}
static DEFINE_MUTEX(thread_mutex);
-static bool simple_thread_cnt;
+static int simple_thread_cnt;
int foo_bar_reg(void)
{
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 16923ba4b5b1..756d14f0d763 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -97,7 +97,6 @@ vmlinux.o: FORCE
$(call cmd,kernel-mod)
# Declare generated files as targets for modpost
-$(symverfile): __modpost ;
$(modules:.ko=.mod.c): __modpost ;
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index c6ad9b1585a1..57263f2f8f2f 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -105,7 +105,6 @@ int main(void)
DEVID_FIELD(input_device_id, sndbit);
DEVID_FIELD(input_device_id, ffbit);
DEVID_FIELD(input_device_id, swbit);
- DEVID_FIELD(input_device_id, propbit);
DEVID(eisa_device_id);
DEVID_FIELD(eisa_device_id, sig);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 2b9395501d81..6ef6e63f96fd 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -761,7 +761,7 @@ static void do_input(char *alias,
sprintf(alias + strlen(alias), "%X,*", i);
}
-/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwXprX where X is comma-separated %02X. */
+/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */
static int do_input_entry(const char *filename, void *symval,
char *alias)
{
@@ -779,7 +779,6 @@ static int do_input_entry(const char *filename, void *symval,
DEF_FIELD_ADDR(symval, input_device_id, sndbit);
DEF_FIELD_ADDR(symval, input_device_id, ffbit);
DEF_FIELD_ADDR(symval, input_device_id, swbit);
- DEF_FIELD_ADDR(symval, input_device_id, propbit);
sprintf(alias, "input:");
@@ -817,9 +816,6 @@ static int do_input_entry(const char *filename, void *symval,
sprintf(alias + strlen(alias), "w*");
if (flags & INPUT_DEVICE_ID_MATCH_SWBIT)
do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
- sprintf(alias + strlen(alias), "pr*");
- if (flags & INPUT_DEVICE_ID_MATCH_PROPBIT)
- do_input(alias, *propbit, 0, INPUT_DEVICE_ID_PROP_MAX);
return 1;
}
ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index d5b291e94264..9cdec70d72b8 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,5 @@
#
# Generated include files
#
-net_names.h
capability_names.h
rlim_names.h
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index dafdd387d42b..81a34426d024 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -4,44 +4,11 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
- resource.o secid.o file.o policy_ns.o label.o mount.o net.o
+ resource.o secid.o file.o policy_ns.o label.o mount.o
apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
-clean-files := capability_names.h rlim_names.h net_names.h
+clean-files := capability_names.h rlim_names.h
-# Build a lower case string table of address family names
-# Transform lines from
-# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
-# #define AF_INET 2 /* Internet IP Protocol */
-# to
-# [1] = "local",
-# [2] = "inet",
-#
-# and build the securityfs entries for the mapping.
-# Transforms lines from
-# #define AF_INET 2 /* Internet IP Protocol */
-# to
-# #define AA_SFS_AF_MASK "local inet"
-quiet_cmd_make-af = GEN $@
-cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
- sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
- 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
- echo "};" >> $@ ;\
- printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
- sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
- 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
- $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
-
-# Build a lower case string table of sock type names
-# Transform lines from
-# SOCK_STREAM = 1,
-# to
-# [1] = "stream",
-quiet_cmd_make-sock = GEN $@
-cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
- sed $^ >>$@ -r -n \
- -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
- echo "};" >> $@
# Build a lower case string table of capability names
# Transforms lines from
@@ -94,7 +61,6 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
$(obj)/capability.o : $(obj)/capability_names.h
-$(obj)/net.o : $(obj)/net_names.h
$(obj)/resource.o : $(obj)/rlim_names.h
$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(src)/Makefile
@@ -102,8 +68,3 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
$(src)/Makefile
$(call cmd,make-rlim)
-$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
- $(srctree)/include/linux/net.h \
- $(src)/Makefile
- $(call cmd,make-af)
- $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 518d5928661b..caaf51dda648 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2202,7 +2202,6 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("policy", aa_sfs_entry_policy),
AA_SFS_DIR("domain", aa_sfs_entry_domain),
AA_SFS_DIR("file", aa_sfs_entry_file),
- AA_SFS_DIR("network", aa_sfs_entry_network),
AA_SFS_DIR("mount", aa_sfs_entry_mount),
AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index db80221891c6..3382518b87fa 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -21,7 +21,6 @@
#include "include/context.h"
#include "include/file.h"
#include "include/match.h"
-#include "include/net.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/label.h"
@@ -567,32 +566,6 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return error;
}
-static int __file_sock_perm(const char *op, struct aa_label *label,
- struct aa_label *flabel, struct file *file,
- u32 request, u32 denied)
-{
- struct socket *sock = (struct socket *) file->private_data;
- int error;
-
- AA_BUG(!sock);
-
- /* revalidation due to label out of date. No revocation at this time */
- if (!denied && aa_label_is_subset(flabel, label))
- return 0;
-
- /* TODO: improve to skip profiles cached in flabel */
- error = aa_sock_file_perm(label, op, request, sock);
- if (denied) {
- /* TODO: improve to skip profiles checked above */
- /* check every profile in file label to is cached */
- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
- }
- if (!error)
- update_file_ctx(file_ctx(file), label, request);
-
- return error;
-}
-
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
@@ -637,9 +610,6 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
error = __file_path_perm(op, label, flabel, file, request,
denied);
- else if (S_ISSOCK(file_inode(file)->i_mode))
- error = __file_sock_perm(op, label, flabel, file, request,
- denied);
done:
rcu_read_unlock();
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index ff4316e1068d..620e81169659 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -121,29 +121,21 @@ struct apparmor_audit_data {
/* these entries require a custom callback fn */
struct {
struct aa_label *peer;
- union {
- struct {
- kuid_t ouid;
- const char *target;
- } fs;
- struct {
- int type, protocol;
- struct sock *peer_sk;
- void *addr;
- int addrlen;
- } net;
- int signal;
- struct {
- int rlim;
- unsigned long max;
- } rlim;
- };
+ struct {
+ const char *target;
+ kuid_t ouid;
+ } fs;
};
struct {
struct aa_profile *profile;
const char *ns;
long pos;
} iface;
+ int signal;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
struct {
const char *src_name;
const char *type;
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
deleted file mode 100644
index 140c8efcf364..000000000000
--- a/security/apparmor/include/net.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * AppArmor security module
- *
- * This file contains AppArmor network mediation definitions.
- *
- * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2017 Canonical Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- */
-
-#ifndef __AA_NET_H
-#define __AA_NET_H
-
-#include <net/sock.h>
-#include <linux/path.h>
-
-#include "apparmorfs.h"
-#include "label.h"
-#include "perms.h"
-#include "policy.h"
-
-#define AA_MAY_SEND AA_MAY_WRITE
-#define AA_MAY_RECEIVE AA_MAY_READ
-
-#define AA_MAY_SHUTDOWN AA_MAY_DELETE
-
-#define AA_MAY_CONNECT AA_MAY_OPEN
-#define AA_MAY_ACCEPT 0x00100000
-
-#define AA_MAY_BIND 0x00200000
-#define AA_MAY_LISTEN 0x00400000
-
-#define AA_MAY_SETOPT 0x01000000
-#define AA_MAY_GETOPT 0x02000000
-
-#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
- AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
- AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
- AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
-
-#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
- AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
- AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
- AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
- AA_MAY_MPROT)
-
-#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
- AA_MAY_ACCEPT)
-struct aa_sk_ctx {
- struct aa_label *label;
- struct aa_label *peer;
- struct path path;
-};
-
-#define SK_CTX(X) ((X)->sk_security)
-#define SOCK_ctx(X) SOCK_INODE(X)->i_security
-#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
- struct lsm_network_audit NAME ## _net = { .sk = (SK), \
- .family = (F)}; \
- DEFINE_AUDIT_DATA(NAME, \
- ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
- LSM_AUDIT_DATA_NONE, \
- OP); \
- NAME.u.net = &(NAME ## _net); \
- aad(&NAME)->net.type = (T); \
- aad(&NAME)->net.protocol = (P)
-
-#define DEFINE_AUDIT_SK(NAME, OP, SK) \
- DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
- (SK)->sk_protocol)
-
-/* struct aa_net - network confinement data
- * @allow: basic network families permissions
- * @audit: which network permissions to force audit
- * @quiet: which network permissions to quiet rejects
- */
-struct aa_net {
- u16 allow[AF_MAX];
- u16 audit[AF_MAX];
- u16 quiet[AF_MAX];
-};
-
-
-extern struct aa_sfs_entry aa_sfs_entry_network[];
-
-void audit_net_cb(struct audit_buffer *ab, void *va);
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- u32 request, u16 family, int type);
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
- int type, int protocol);
-static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
- struct common_audit_data *sa,
- u32 request,
- struct sock *sk)
-{
- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
- sk->sk_type);
-}
-int aa_sk_perm(const char *op, u32 request, struct sock *sk);
-
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
- struct socket *sock);
-
-
-static inline void aa_free_net_rules(struct aa_net *new)
-{
- /* NOP */
-}
-
-#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index af04d5a7d73d..2b27bb79aec4 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -135,10 +135,9 @@ extern struct aa_perms allperms;
void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
-void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
- u32 mask);
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char * const *names, u32 namesmask);
+ u32 chrsmask, const char **names, u32 namesmask);
void aa_apply_modes_to_perms(struct aa_profile *profile,
struct aa_perms *perms);
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 4364088a0b9e..17fe41a9cac3 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -30,7 +30,6 @@
#include "file.h"
#include "lib.h"
#include "label.h"
-#include "net.h"
#include "perms.h"
#include "resource.h"
@@ -112,7 +111,6 @@ struct aa_data {
* @policy: general match rules governing policy
* @file: The set of rules governing basic file access and domain transitions
* @caps: capabilities for the profile
- * @net: network controls for the profile
* @rlimits: rlimits for the profile
*
* @dents: dentries for the profiles file entries in apparmorfs
@@ -150,7 +148,6 @@ struct aa_profile {
struct aa_policydb policy;
struct aa_file_rules file;
struct aa_caps caps;
- struct aa_net net;
struct aa_rlimit rlimits;
struct aa_loaddata *rawdata;
@@ -223,16 +220,6 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
return 0;
}
-static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
- u16 AF) {
- unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
- u16 be_af = cpu_to_be16(AF);
-
- if (!state)
- return 0;
- return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
-}
-
/**
* aa_get_profile - increment refcount on profile @p
* @p: profile (MAYBE NULL)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 8818621b5d95..08ca26bcca77 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -211,8 +211,7 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
*str = '\0';
}
-void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
- u32 mask)
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
{
const char *fmt = "%s";
unsigned int i, perm = 1;
@@ -230,7 +229,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
}
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char * const *names, u32 namesmask)
+ u32 chrsmask, const char **names, u32 namesmask)
{
char str[33];
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 72b915dfcaf7..1346ee5be04f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -33,7 +33,6 @@
#include "include/context.h"
#include "include/file.h"
#include "include/ipc.h"
-#include "include/net.h"
#include "include/path.h"
#include "include/label.h"
#include "include/policy.h"
@@ -737,368 +736,6 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
return error;
}
-/**
- * apparmor_sk_alloc_security - allocate and attach the sk_security field
- */
-static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
-{
- struct aa_sk_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), flags);
- if (!ctx)
- return -ENOMEM;
-
- SK_CTX(sk) = ctx;
-
- return 0;
-}
-
-/**
- * apparmor_sk_free_security - free the sk_security field
- */
-static void apparmor_sk_free_security(struct sock *sk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- SK_CTX(sk) = NULL;
- aa_put_label(ctx->label);
- aa_put_label(ctx->peer);
- path_put(&ctx->path);
- kfree(ctx);
-}
-
-/**
- * apparmor_clone_security - clone the sk_security field
- */
-static void apparmor_sk_clone_security(const struct sock *sk,
- struct sock *newsk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
- struct aa_sk_ctx *new = SK_CTX(newsk);
-
- new->label = aa_get_label(ctx->label);
- new->peer = aa_get_label(ctx->peer);
- new->path = ctx->path;
- path_get(&new->path);
-}
-
-static int aa_sock_create_perm(struct aa_label *label, int family, int type,
- int protocol)
-{
- AA_BUG(!label);
- AA_BUG(in_interrupt());
-
- return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type,
- protocol);
-}
-
-
-/**
- * apparmor_socket_create - check perms before creating a new socket
- */
-static int apparmor_socket_create(int family, int type, int protocol, int kern)
-{
- struct aa_label *label;
- int error = 0;
-
- label = begin_current_label_crit_section();
- if (!(kern || unconfined(label)))
- error = aa_sock_create_perm(label, family, type, protocol);
- end_current_label_crit_section(label);
-
- return error;
-}
-
-/**
- * apparmor_socket_post_create - setup the per-socket security struct
- *
- * Note:
- * - kernel sockets currently labeled unconfined but we may want to
- * move to a special kernel label
- * - socket may not have sk here if created with sock_create_lite or
- * sock_alloc. These should be accept cases which will be handled in
- * sock_graft.
- */
-static int apparmor_socket_post_create(struct socket *sock, int family,
- int type, int protocol, int kern)
-{
- struct aa_label *label;
-
- if (kern) {
- struct aa_ns *ns = aa_get_current_ns();
-
- label = aa_get_label(ns_unconfined(ns));
- aa_put_ns(ns);
- } else
- label = aa_get_current_label();
-
- if (sock->sk) {
- struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
-
- aa_put_label(ctx->label);
- ctx->label = aa_get_label(label);
- }
- aa_put_label(label);
-
- return 0;
-}
-
-/**
- * apparmor_socket_bind - check perms before bind addr to socket
- */
-static int apparmor_socket_bind(struct socket *sock,
- struct sockaddr *address, int addrlen)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!address);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
-}
-
-/**
- * apparmor_socket_connect - check perms before connecting @sock to @address
- */
-static int apparmor_socket_connect(struct socket *sock,
- struct sockaddr *address, int addrlen)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!address);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
-}
-
-/**
- * apparmor_socket_list - check perms before allowing listen
- */
-static int apparmor_socket_listen(struct socket *sock, int backlog)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
-}
-
-/**
- * apparmor_socket_accept - check perms before accepting a new connection.
- *
- * Note: while @newsock is created and has some information, the accept
- * has not been done.
- */
-static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!newsock);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
-}
-
-static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
- struct msghdr *msg, int size)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!msg);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_socket_sendmsg - check perms before sending msg to another socket
- */
-static int apparmor_socket_sendmsg(struct socket *sock,
- struct msghdr *msg, int size)
-{
- return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
-}
-
-/**
- * apparmor_socket_recvmsg - check perms before receiving a message
- */
-static int apparmor_socket_recvmsg(struct socket *sock,
- struct msghdr *msg, int size, int flags)
-{
- return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
-}
-
-/* revaliation, get/set attr, shutdown */
-static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_socket_getsockname - check perms before getting the local address
- */
-static int apparmor_socket_getsockname(struct socket *sock)
-{
- return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
-}
-
-/**
- * apparmor_socket_getpeername - check perms before getting remote address
- */
-static int apparmor_socket_getpeername(struct socket *sock)
-{
- return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
-}
-
-/* revaliation, get/set attr, opt */
-static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
- int level, int optname)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_getsockopt - check perms before getting socket options
- */
-static int apparmor_socket_getsockopt(struct socket *sock, int level,
- int optname)
-{
- return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
- level, optname);
-}
-
-/**
- * apparmor_setsockopt - check perms before setting socket options
- */
-static int apparmor_socket_setsockopt(struct socket *sock, int level,
- int optname)
-{
- return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
- level, optname);
-}
-
-/**
- * apparmor_socket_shutdown - check perms before shutting down @sock conn
- */
-static int apparmor_socket_shutdown(struct socket *sock, int how)
-{
- return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
-}
-
-/**
- * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
- *
- * Note: can not sleep may be called with locks held
- *
- * dont want protocol specific in __skb_recv_datagram()
- * to deny an incoming connection socket_sock_rcv_skb()
- */
-static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
- return 0;
-}
-
-
-static struct aa_label *sk_peer_label(struct sock *sk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- if (ctx->peer)
- return ctx->peer;
-
- return ERR_PTR(-ENOPROTOOPT);
-}
-
-/**
- * apparmor_socket_getpeersec_stream - get security context of peer
- *
- * Note: for tcp only valid if using ipsec or cipso on lan
- */
-static int apparmor_socket_getpeersec_stream(struct socket *sock,
- char __user *optval,
- int __user *optlen,
- unsigned int len)
-{
- char *name;
- int slen, error = 0;
- struct aa_label *label;
- struct aa_label *peer;
-
- label = begin_current_label_crit_section();
- peer = sk_peer_label(sock->sk);
- if (IS_ERR(peer)) {
- error = PTR_ERR(peer);
- goto done;
- }
- slen = aa_label_asxprint(&name, labels_ns(label), peer,
- FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
- FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
- /* don't include terminating \0 in slen, it breaks some apps */
- if (slen < 0) {
- error = -ENOMEM;
- } else {
- if (slen > len) {
- error = -ERANGE;
- } else if (copy_to_user(optval, name, slen)) {
- error = -EFAULT;
- goto out;
- }
- if (put_user(slen, optlen))
- error = -EFAULT;
-out:
- kfree(name);
-
- }
-
-done:
- end_current_label_crit_section(label);
-
- return error;
-}
-
-/**
- * apparmor_socket_getpeersec_dgram - get security label of packet
- * @sock: the peer socket
- * @skb: packet data
- * @secid: pointer to where to put the secid of the packet
- *
- * Sets the netlabel socket state on sk from parent
- */
-static int apparmor_socket_getpeersec_dgram(struct socket *sock,
- struct sk_buff *skb, u32 *secid)
-
-{
- /* TODO: requires secid support */
- return -ENOPROTOOPT;
-}
-
-/**
- * apparmor_sock_graft - Initialize newly created socket
- * @sk: child sock
- * @parent: parent socket
- *
- * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
- * just set sk security information off of current creating process label
- * Labeling of sk for accept case - probably should be sock based
- * instead of task, because of the case where an implicitly labeled
- * socket is shared by different tasks.
- */
-static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- if (!ctx->label)
- ctx->label = aa_get_current_label();
-}
-
static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1133,30 +770,6 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
- LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
- LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
- LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
-
- LSM_HOOK_INIT(socket_create, apparmor_socket_create),
- LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
- LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
- LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
- LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
- LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
- LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
- LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
- LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
- LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
- LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
- LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
- LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
- LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
- LSM_HOOK_INIT(socket_getpeersec_stream,
- apparmor_socket_getpeersec_stream),
- LSM_HOOK_INIT(socket_getpeersec_dgram,
- apparmor_socket_getpeersec_dgram),
- LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
-
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
deleted file mode 100644
index 33d54435f8d6..000000000000
--- a/security/apparmor/net.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * AppArmor security module
- *
- * This file contains AppArmor network mediation
- *
- * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2017 Canonical Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- */
-
-#include "include/apparmor.h"
-#include "include/audit.h"
-#include "include/context.h"
-#include "include/label.h"
-#include "include/net.h"
-#include "include/policy.h"
-
-#include "net_names.h"
-
-
-struct aa_sfs_entry aa_sfs_entry_network[] = {
- AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
- { }
-};
-
-static const char * const net_mask_names[] = {
- "unknown",
- "send",
- "receive",
- "unknown",
-
- "create",
- "shutdown",
- "connect",
- "unknown",
-
- "setattr",
- "getattr",
- "setcred",
- "getcred",
-
- "chmod",
- "chown",
- "chgrp",
- "lock",
-
- "mmap",
- "mprot",
- "unknown",
- "unknown",
-
- "accept",
- "bind",
- "listen",
- "unknown",
-
- "setopt",
- "getopt",
- "unknown",
- "unknown",
-
- "unknown",
- "unknown",
- "unknown",
- "unknown",
-};
-
-
-/* audit callback for net specific fields */
-void audit_net_cb(struct audit_buffer *ab, void *va)
-{
- struct common_audit_data *sa = va;
-
- audit_log_format(ab, " family=");
- if (address_family_names[sa->u.net->family])
- audit_log_string(ab, address_family_names[sa->u.net->family]);
- else
- audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family);
- audit_log_format(ab, " sock_type=");
- if (sock_type_names[aad(sa)->net.type])
- audit_log_string(ab, sock_type_names[aad(sa)->net.type]);
- else
- audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type);
- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
-
- if (aad(sa)->request & NET_PERMS_MASK) {
- audit_log_format(ab, " requested_mask=");
- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
-
- if (aad(sa)->denied & NET_PERMS_MASK) {
- audit_log_format(ab, " denied_mask=");
- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
- }
- }
- if (aad(sa)->peer) {
- audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
-}
-
-
-/* Generic af perm */
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- u32 request, u16 family, int type)
-{
- struct aa_perms perms = { };
-
- AA_BUG(family >= AF_MAX);
- AA_BUG(type < 0 || type >= SOCK_MAX);
-
- if (profile_unconfined(profile))
- return 0;
-
- perms.allow = (profile->net.allow[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- perms.audit = (profile->net.audit[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- perms.quiet = (profile->net.quiet[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- aa_apply_modes_to_perms(profile, &perms);
-
- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
-}
-
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
- int type, int protocol)
-{
- struct aa_profile *profile;
- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
-
- return fn_for_each_confined(label, profile,
- aa_profile_af_perm(profile, &sa, request, family,
- type));
-}
-
-static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
- struct sock *sk)
-{
- struct aa_profile *profile;
- DEFINE_AUDIT_SK(sa, op, sk);
-
- AA_BUG(!label);
- AA_BUG(!sk);
-
- if (unconfined(label))
- return 0;
-
- return fn_for_each_confined(label, profile,
- aa_profile_af_sk_perm(profile, &sa, request, sk));
-}
-
-int aa_sk_perm(const char *op, u32 request, struct sock *sk)
-{
- struct aa_label *label;
- int error;
-
- AA_BUG(!sk);
- AA_BUG(in_interrupt());
-
- /* TODO: switch to begin_current_label ???? */
- label = begin_current_label_crit_section();
- error = aa_label_sk_perm(label, op, request, sk);
- end_current_label_crit_section(label);
-
- return error;
-}
-
-
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
- struct socket *sock)
-{
- AA_BUG(!label);
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
-
- return aa_label_sk_perm(label, op, request, sock->sk);
-}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 5a2aec358322..4ede87c30f8b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -275,19 +275,6 @@ fail:
return 0;
}
-static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
-{
- if (unpack_nameX(e, AA_U16, name)) {
- if (!inbounds(e, sizeof(u16)))
- return 0;
- if (data)
- *data = le16_to_cpu(get_unaligned((__le16 *) e->pos));
- e->pos += sizeof(u16);
- return 1;
- }
- return 0;
-}
-
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
if (unpack_nameX(e, AA_U32, name)) {
@@ -597,7 +584,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
struct aa_profile *profile = NULL;
const char *tmpname, *tmpns = NULL, *name = NULL;
const char *info = "failed to unpack profile";
- size_t size = 0, ns_len;
+ size_t ns_len;
struct rhashtable_params params = { 0 };
char *key = NULL;
struct aa_data *data;
@@ -730,38 +717,6 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
- size = unpack_array(e, "net_allowed_af");
- if (size) {
-
- for (i = 0; i < size; i++) {
- /* discard extraneous rules that this kernel will
- * never request
- */
- if (i >= AF_MAX) {
- u16 tmp;
-
- if (!unpack_u16(e, &tmp, NULL) ||
- !unpack_u16(e, &tmp, NULL) ||
- !unpack_u16(e, &tmp, NULL))
- goto fail;
- continue;
- }
- if (!unpack_u16(e, &profile->net.allow[i], NULL))
- goto fail;
- if (!unpack_u16(e, &profile->net.audit[i], NULL))
- goto fail;
- if (!unpack_u16(e, &profile->net.quiet[i], NULL))
- goto fail;
- }
- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
- goto fail;
- }
- if (VERSION_LT(e->version, v7)) {
- /* pre v7 policy always allowed these */
- profile->net.allow[AF_UNIX] = 0xffff;
- profile->net.allow[AF_NETLINK] = 0xffff;
- }
-
if (unpack_nameX(e, AA_STRUCT, "policydb")) {
/* generic policy dfa - optional and may be NULL */
info = "failed to unpack policydb";
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0ce71111b4e3..546d515f3c1f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0215:
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
case 0x10ec0282:
@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0275, 0x1028, 0, "ALC3260" },
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+ { 0x10ec0236, 0x1028, 0, "ALC3204" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
break;
@@ -6419,6 +6428,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170150},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -6806,6 +6823,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
+ case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
spec->shutup = alc256_shutup;
@@ -7857,6 +7875,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index f650346aaa1a..4a4b6e78c977 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -887,8 +887,8 @@ struct xdp_md {
};
enum sk_action {
- SK_ABORTED = 0,
- SK_DROP,
+ SK_DROP = 0,
+ SK_PASS,
SK_REDIRECT,
};
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a0c518ecf085..c0e26ad1fa7e 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
&insn->immediate,
&insn->stack_op);
if (ret)
- return ret;
+ goto err;
if (!insn->type || insn->type > INSN_LAST) {
WARN_FUNC("invalid instruction type %d",
insn->sec, insn->offset, insn->type);
- return -1;
+ ret = -1;
+ goto err;
}
hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
}
return 0;
+
+err:
+ free(insn);
+ return ret;
}
/*
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e397453e5a46..63526f4416ea 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
SYNOPSIS
--------
[verse]
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
+'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
DESCRIPTION
-----------
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 462fc755092e..7a84d73324e3 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,6 +10,9 @@
. $(dirname $0)/lib/probe.sh
+ld=$(realpath /lib64/ld*.so.* | uniq)
+libc=$(echo $ld | sed 's/ld/libc/g')
+
trace_libc_inet_pton_backtrace() {
idx=0
expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
expected[3]=".*packets transmitted.*"
expected[4]="rtt min.*"
expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
- expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
- expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
+ expected[6]=".*inet_pton[[:space:]]\($libc\)$"
+ expected[7]="getaddrinfo[[:space:]]\($libc\)$"
expected[8]=".*\(.*/bin/ping.*\)$"
perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
}
skip_if_no_perf_probe && \
-perf probe -q /lib64/libc-*.so inet_pton && \
+perf probe -q $libc inet_pton && \
trace_libc_inet_pton_backtrace
err=$?
rm -f ${file}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ddb2c6fbdf91..db79017a6e56 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
- list_del(&format->list);
+ list_del_init(&format->list);
}
void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
static void fmt_free(struct perf_hpp_fmt *fmt)
{
+ /*
+ * At this point fmt should be completely
+ * unhooked, if not it's a bug.
+ */
+ BUG_ON(!list_empty(&fmt->list));
+ BUG_ON(!list_empty(&fmt->sort_list));
+
if (fmt->free)
fmt->free(fmt);
}
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index c42edeac451f..dcfdafdc2f1c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -8,6 +8,9 @@
%{
#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "../perf.h"
#include "parse-events.h"
#include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
return token;
}
-static bool isbpf(yyscan_t scanner)
+static bool isbpf_suffix(char *text)
{
- char *text = parse_events_get_text(scanner);
int len = strlen(text);
if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
return false;
}
+static bool isbpf(yyscan_t scanner)
+{
+ char *text = parse_events_get_text(scanner);
+ struct stat st;
+
+ if (!isbpf_suffix(text))
+ return false;
+
+ return stat(text, &st) == 0;
+}
+
/*
* This function is called when the parser gets two kind of input:
*
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a7ebd9fe8e40..76ab0709a20c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->mmap2 = process_event_stub;
if (tool->comm == NULL)
tool->comm = process_event_stub;
+ if (tool->namespaces == NULL)
+ tool->namespaces = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
index 4ba726c90870..54af60462130 100644
--- a/tools/perf/util/xyarray.h
+++ b/tools/perf/util/xyarray.h
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
static inline int xyarray__max_y(struct xyarray *xy)
{
- return xy->max_x;
+ return xy->max_y;
}
static inline int xyarray__max_x(struct xyarray *xy)
{
- return xy->max_y;
+ return xy->max_x;
}
#endif /* _PERF_XYARRAY_H_ */
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 4c5a481a850c..d6e1c02ddcfe 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,7 +26,7 @@ endif
ifneq ($(OUTPUT),)
# check that the output directory actually exists
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 9dc8f078a83c..1e8b6116ba3c 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -1,7 +1,7 @@
ifneq ($(O),)
ifeq ($(origin O), command line)
- ABSOLUTE_O := $(realpath $(O))
- dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist))
+ dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+ ABSOLUTE_O := $(shell cd $(O) ; pwd)
OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
COMMAND_O := O=$(ABSOLUTE_O)
ifeq ($(objtree),)
@@ -12,7 +12,7 @@ endif
# check that the output directory actually exists
ifneq ($(OUTPUT),)
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index c727b96a59b0..5fa02d86b35f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -17,5 +17,26 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "d052",
+ "name": "Add 1M filters with the same action",
+ "category": [
+ "filter",
+ "flower"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
+ ],
+ "cmdUnderTest": "$TC -b $BATCH_FILE",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm $BATCH_FILE"
+ ]
}
-] \ No newline at end of file
+]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index d2391df2cf7d..5508730ee1ea 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -88,7 +88,7 @@ def prepare_env(cmdlist):
exit(1)
-def test_runner(filtered_tests):
+def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
@@ -105,6 +105,8 @@ def test_runner(filtered_tests):
for tidx in testlist:
result = True
tresult = ""
+ if "flower" in tidx["category"] and args.device == None:
+ continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
@@ -152,6 +154,10 @@ def ns_create():
exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV1 up'
exec_cmd(cmd, False)
+ cmd = 'ip link set $DEV2 netns $NS'
+ exec_cmd(cmd, False)
+ cmd = 'ip -s $NS link set $DEV2 up'
+ exec_cmd(cmd, False)
def ns_destroy():
@@ -211,7 +217,8 @@ def set_args(parser):
help='Execute the single test case with specified ID')
parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
help='Generate ID numbers for new test cases')
- return parser
+ parser.add_argument('-d', '--device',
+ help='Execute the test case in flower category')
return parser
@@ -225,6 +232,8 @@ def check_default_settings(args):
if args.path != None:
NAMES['TC'] = args.path
+ if args.device != None:
+ NAMES['DEV2'] = args.device
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
@@ -381,14 +390,17 @@ def set_operation_mode(args):
if (len(alltests) == 0):
print("Cannot find a test case with ID matching " + target_id)
exit(1)
- catresults = test_runner(alltests)
+ catresults = test_runner(alltests, args)
print("All test results: " + "\n\n" + catresults)
elif (len(target_category) > 0):
+ if (target_category == "flower") and args.device == None:
+ print("Please specify a NIC device (-d) to run category flower")
+ exit(1)
if (target_category not in ucat):
print("Specified category is not present in this file.")
exit(1)
else:
- catresults = test_runner(testcases[target_category])
+ catresults = test_runner(testcases[target_category], args)
print("Category " + target_category + "\n\n" + catresults)
ns_destroy()
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
new file mode 100755
index 000000000000..707c6bfef689
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+
+"""
+tdc_batch.py - a script to generate TC batch file
+
+Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
+"""
+
+import argparse
+
+parser = argparse.ArgumentParser(description='TC batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("file", help="batch file name")
+parser.add_argument("-n", "--number", type=int,
+ help="how many lines in batch file")
+parser.add_argument("-o", "--skip_sw",
+ help="skip_sw (offload), by default skip_hw",
+ action="store_true")
+parser.add_argument("-s", "--share_action",
+ help="all filters share the same action",
+ action="store_true")
+parser.add_argument("-p", "--prio",
+ help="all filters have different prio",
+ action="store_true")
+args = parser.parse_args()
+
+device = args.device
+file = open(args.file, 'w')
+
+number = 1
+if args.number:
+ number = args.number
+
+skip = "skip_hw"
+if args.skip_sw:
+ skip = "skip_sw"
+
+share_action = ""
+if args.share_action:
+ share_action = "index 1"
+
+prio = "prio 1"
+if args.prio:
+ prio = ""
+ if number > 0x4000:
+ number = 0x4000
+
+index = 0
+for i in range(0x100):
+ for j in range(0x100):
+ for k in range(0x100):
+ mac = ("%02x:%02x:%02x" % (i, j, k))
+ src_mac = "e4:11:00:" + mac
+ dst_mac = "e4:12:00:" + mac
+ cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
+ "src_mac %s dst_mac %s action drop %s" %
+ (device, prio, skip, src_mac, dst_mac, share_action))
+ file.write("%s\n" % cmd)
+ index += 1
+ if index >= number:
+ file.close()
+ exit(0)
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 01087375a7c3..b6352515c1b5 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -12,6 +12,8 @@ NAMES = {
# Name of veth devices to be created for the namespace
'DEV0': 'v0p0',
'DEV1': 'v0p1',
+ 'DEV2': '',
+ 'BATCH_FILE': './batch.txt',
# Name of the namespace to use
'NS': 'tcut'
}