diff options
author | Takashi Iwai <tiwai@suse.de> | 2019-02-28 13:30:55 +0100 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2019-02-28 13:30:55 +0100 |
commit | 70395a96bd882d8dba669f99b5cec0008690accd (patch) | |
tree | 37f3a4c2cf35828f153f3ce59c3daa4f39241c32 | |
parent | f97a0944a72b26a2bece72516294e112a890f98a (diff) | |
parent | 3146089d235b304e665dc551472ae9cb9ab58572 (diff) | |
download | linux-70395a96bd882d8dba669f99b5cec0008690accd.tar.gz linux-70395a96bd882d8dba669f99b5cec0008690accd.tar.bz2 linux-70395a96bd882d8dba669f99b5cec0008690accd.zip |
Merge tag 'asoc-v5.1-2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: More changes for v5.1
Another batch of changes for ASoC, no big core changes - it's mainly
small fixes and improvements for individual drivers.
- A big refresh and cleanup of the Samsung drivers, fixing a number of
issues which allow the driver to be used with a wider range of
userspaces.
- Fixes for the Intel drivers to make them more standard so less likely
to get bitten by core issues.
- New driver for Cirrus Logic CS35L26.
1035 files changed, 13554 insertions, 5682 deletions
@@ -842,10 +842,9 @@ D: ax25-utils maintainer. N: Helge Deller E: deller@gmx.de -E: hdeller@redhat.de -D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver -S: Schimmelsrain 1 -S: D-69231 Rauenberg +W: http://www.parisc-linux.org/ +D: PA-RISC Linux architecture maintainer +D: LASI-, ASP-, WAX-, LCD/LED-driver S: Germany N: Jean Delvare @@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape S: South Africa N: Grant Grundler -E: grundler@parisc-linux.org +E: grantgrundler@gmail.com W: http://obmouse.sourceforge.net/ W: http://www.parisc-linux.org/ D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver @@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206 S: USA N: Kyle McMartin -E: kyle@parisc-linux.org +E: kyle@mcmartin.ca D: Linux/PARISC hacker D: AD1889 sound driver S: Ottawa, Canada @@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct S: Cupertino, CA 95014 S: USA -N: Thibaut Varene -E: T-Bone@parisc-linux.org -W: http://www.parisc-linux.org/~varenet/ -P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063 +N: Thibaut Varène +E: hacks+kernel@slashdirt.org +W: http://hacks.slashdirt.org/ D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there D: AD1889 sound driver -S: Paris, France +S: France N: Heikki Vatiainen E: hessu@cs.tut.fi diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 9b642669cb16..169fe08a649b 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io @@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ cpld3_version Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak <vadimpmellanox.com> Description: These files show with which CPLD versions have been burned on LED board. @@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ jtag_enable Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak <vadimpmellanox.com> Description: These files enable and disable the access to the JTAG domain. By default access to the JTAG domain is disabled. @@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ reset_voltmon_upgrade_fail Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak <vadimpmellanox.com> Description: These files show the system reset cause, as following: ComEx power fail, reset from ComEx, system platform reset, reset diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index 0797eec76be1..47e577264198 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -1,9 +1,9 @@ .. _readme: -Linux kernel release 4.x <http://kernel.org/> +Linux kernel release 5.x <http://kernel.org/> ============================================= -These are the release notes for Linux version 4. Read them carefully, +These are the release notes for Linux version 5. Read them carefully, as they tell you what this is all about, explain how to install the kernel, and what to do if something goes wrong. @@ -63,7 +63,7 @@ Installing the kernel source directory where you have permissions (e.g. your home directory) and unpack it:: - xz -cd linux-4.X.tar.xz | tar xvf - + xz -cd linux-5.x.tar.xz | tar xvf - Replace "X" with the version number of the latest kernel. @@ -72,26 +72,26 @@ Installing the kernel source files. They should match the library, and not get messed up by whatever the kernel-du-jour happens to be. - - You can also upgrade between 4.x releases by patching. Patches are + - You can also upgrade between 5.x releases by patching. Patches are distributed in the xz format. To install by patching, get all the newer patch files, enter the top level directory of the kernel source - (linux-4.X) and execute:: + (linux-5.x) and execute:: - xz -cd ../patch-4.x.xz | patch -p1 + xz -cd ../patch-5.x.xz | patch -p1 - Replace "x" for all versions bigger than the version "X" of your current + Replace "x" for all versions bigger than the version "x" of your current source tree, **in_order**, and you should be ok. You may want to remove the backup files (some-file-name~ or some-file-name.orig), and make sure that there are no failed patches (some-file-name# or some-file-name.rej). If there are, either you or I have made a mistake. - Unlike patches for the 4.x kernels, patches for the 4.x.y kernels + Unlike patches for the 5.x kernels, patches for the 5.x.y kernels (also known as the -stable kernels) are not incremental but instead apply - directly to the base 4.x kernel. For example, if your base kernel is 4.0 - and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 - and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and - want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, - patch -R) **before** applying the 4.0.3 patch. You can read more on this in + directly to the base 5.x kernel. For example, if your base kernel is 5.0 + and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1 + and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and + want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is, + patch -R) **before** applying the 5.0.3 patch. You can read more on this in :ref:`Documentation/process/applying-patches.rst <applying_patches>`. Alternatively, the script patch-kernel can be used to automate this @@ -114,7 +114,7 @@ Installing the kernel source Software requirements --------------------- - Compiling and running the 4.x kernels requires up-to-date + Compiling and running the 5.x kernels requires up-to-date versions of various software packages. Consult :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers required and how to get updates for these packages. Beware that using @@ -132,12 +132,12 @@ Build directory for the kernel place for the output files (including .config). Example:: - kernel source code: /usr/src/linux-4.X + kernel source code: /usr/src/linux-5.x build directory: /home/name/build/kernel To configure and build the kernel, use:: - cd /usr/src/linux-4.X + cd /usr/src/linux-5.x make O=/home/name/build/kernel menuconfig make O=/home/name/build/kernel sudo make O=/home/name/build/kernel modules_install install diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..858b6c0b9a15 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1696,12 +1696,11 @@ By default, super page will be supported if Intel IOMMU has the capability. With this option, super page will not be supported. - sm_off [Default Off] - By default, scalable mode will be supported if the + sm_on [Default Off] + By default, scalable mode will be disabled even if the hardware advertises that it has support for the scalable mode translation. With this option set, scalable mode - will not be used even on hardware which claims to support - it. + will be used on hardware which claims to support it. tboot_noforce [Default Off] Do not force the Intel IOMMU enabled under tboot. By default, tboot will force Intel IOMMU on, which diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile index 6e5cef0ed6fb..50daa0b3b032 100644 --- a/Documentation/devicetree/bindings/Makefile +++ b/Documentation/devicetree/bindings/Makefile @@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA) quiet_cmd_mk_schema = SCHEMA $@ cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) -DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') +DT_DOCS = $(shell \ + cd $(srctree)/$(src) && \ + find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ + ) + DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt index 36603419d6f8..0e72183f52bc 100644 --- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt +++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt @@ -4,14 +4,10 @@ Required properties: - compatible : "olpc,ap-sp" - reg : base address and length of SoC's WTM registers - interrupts : SP-AP interrupt -- clocks : phandle + clock-specifier for the clock that drives the WTM -- clock-names: should be "sp" Example: ap-sp@d4290000 { compatible = "olpc,ap-sp"; reg = <0xd4290000 0x1000>; interrupts = <40>; - clocks = <&soc_clocks MMP2_CLK_SP>; - clock-names = "sp"; } diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.txt b/Documentation/devicetree/bindings/sound/adi,adau1977.txt index e79aeef73f28..9225472c80b4 100644 --- a/Documentation/devicetree/bindings/sound/adi,adau1977.txt +++ b/Documentation/devicetree/bindings/sound/adi,adau1977.txt @@ -17,12 +17,18 @@ Required properties: Documentation/devicetree/bindings/regulator/regulator.txt Optional properties: - - reset-gpio: the reset pin for the chip, for more details consult + - reset-gpios: the reset pin for the chip, for more details consult Documentation/devicetree/bindings/gpio/gpio.txt - DVDD-supply: supply voltage for the digital core, please consult Documentation/devicetree/bindings/regulator/regulator.txt +- adi,micbias: configures the voltage setting for the MICBIAS pin. + Select 0/1/2/3/4/5/6/7/8 to specify MICBIAS voltage + 5V/5.5V/6V/6.5V/7V/7.5V/8V/8.5V/9V + If not specified the default value will be "7" meaning 8.5 Volts. + This property is only valid for the ADAU1977 + For required properties on SPI, please consult Documentation/devicetree/bindings/spi/spi-bus.txt @@ -40,7 +46,8 @@ Examples: AVDD-supply = <®ulator>; DVDD-supply = <®ulator_digital>; - reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>; + adi,micbias = <3>; + reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; adau1977_i2c: adau1977@11 { @@ -50,5 +57,5 @@ Examples: AVDD-supply = <®ulator>; DVDD-supply = <®ulator_digital>; - reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; diff --git a/Documentation/devicetree/bindings/sound/cs35l36.txt b/Documentation/devicetree/bindings/sound/cs35l36.txt new file mode 100644 index 000000000000..912bd162b477 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cs35l36.txt @@ -0,0 +1,168 @@ +CS35L36 Speaker Amplifier + +Required properties: + + - compatible : "cirrus,cs35l36" + + - reg : the I2C address of the device for I2C + + - VA-supply, VP-supply : power supplies for the device, + as covered in + Documentation/devicetree/bindings/regulator/regulator.txt. + + - cirrus,boost-ctl-millivolt : Boost Voltage Value. Configures the boost + converter's output voltage in mV. The range is from 2550mV to 12000mV with + increments of 50mV. + (Default) VP + + - cirrus,boost-peak-milliamp : Boost-converter peak current limit in mA. + Configures the peak current by monitoring the current through the boost FET. + Range starts at 1600mA and goes to a maximum of 4500mA with increments of + 50mA. + (Default) 4.50 Amps + + - cirrus,boost-ind-nanohenry : Inductor estimation LBST reference value. + Seeds the digital boost converter's inductor estimation block with the initial + inductance value to reference. + + 1000 = 1uH (Default) + 1200 = 1.2uH + +Optional properties: + - cirrus,multi-amp-mode : Boolean to determine if there are more than + one amplifier in the system. If more than one it is best to Hi-Z the ASP + port to prevent bus contention on the output signal + + - cirrus,boost-ctl-select : Boost conerter control source selection. + Selects the source of the BST_CTL target VBST voltage for the boost + converter to generate. + 0x00 - Control Port Value + 0x01 - Class H Tracking (Default) + 0x10 - MultiDevice Sync Value + + - cirrus,amp-pcm-inv : Boolean to determine Amplifier will invert incoming + PCM data + + - cirrus,imon-pol-inv : Boolean to determine Amplifier will invert the + polarity of outbound IMON feedback data + + - cirrus,vmon-pol-inv : Boolean to determine Amplifier will invert the + polarity of outbound VMON feedback data + + - cirrus,dcm-mode-enable : Boost converter automatic DCM Mode enable. + This enables the digital boost converter to operate in a low power + (Discontinuous Conduction) mode during low loading conditions. + + - cirrus,weak-fet-disable : Boolean : The strength of the output drivers is + reduced when operating in a Weak-FET Drive Mode and must not be used to drive + a large load. + + - cirrus,classh-wk-fet-delay : Weak-FET entry delay. Controls the delay + (in ms) before the Class H algorithm switches to the weak-FET voltage + (after the audio falls and remains below the value specified in WKFET_AMP_THLD). + + 0 = 0ms + 1 = 5ms + 2 = 10ms + 3 = 50ms + 4 = 100ms (Default) + 5 = 200ms + 6 = 500ms + 7 = 1000ms + + - cirrus,classh-weak-fet-thld-millivolt : Weak-FET amplifier drive threshold. + Configures the signal threshold at which the PWM output stage enters + weak-FET operation. The range is 50mV to 700mV in 50mV increments. + + - cirrus,temp-warn-threshold : Amplifier overtemperature warning threshold. + Configures the threshold at which the overtemperature warning condition occurs. + When the threshold is met, the overtemperature warning attenuation is applied + and the TEMP_WARN_EINT interrupt status bit is set. + If TEMP_WARN_MASK = 0, INTb is asserted. + + 0 = 105C + 1 = 115C + 2 = 125C (Default) + 3 = 135C + + - cirrus,irq-drive-select : Selects the driver type of the selected interrupt + output. + + 0 = Open-drain + 1 = Push-pull (Default) + + - cirrus,irq-gpio-select : Selects the pin to serve as the programmable + interrupt output. + + 0 = PDM_DATA / SWIRE_SD / INT (Default) + 1 = GPIO + +Optional properties for the "cirrus,vpbr-config" Sub-node + + - cirrus,vpbr-en : VBST brownout prevention enable. Configures whether the + VBST brownout prevention algorithm is enabled or disabled. + + 0 = VBST brownout prevention disabled (default) + 1 = VBST brownout prevention enabled + + See Section 7.31.1 VPBR Config for configuration options & further details + + - cirrus,vpbr-thld : Initial VPBR threshold. Configures the VP brownout + threshold voltage + + - cirrus,cirrus,vpbr-atk-rate : Attenuation attack step rate. Configures the + amount delay between consecutive volume attenuation steps when a brownout + condition is present and the VP brownout condition is in an attacking state. + + - cirrus,vpbr-atk-vol : VP brownout prevention step size. Configures the VP + brownout prevention attacking attenuation step size when operating in either + digital volume or analog gain modes. + + - cirrus,vpbr-max-attn : Maximum attenuation that the VP brownout prevention + can apply to the audio signal. + + - cirrus,vpbr-wait : Configures the delay time between a brownout condition + no longer being present and the VP brownout prevention entering an attenuation + release state. + + - cirrus,vpbr-rel-rate : Attenuation release step rate. Configures the delay + between consecutive volume attenuation release steps when a brownout condition + is not longer present and the VP brownout is in an attenuation release state. + + - cirrus,vpbr-mute-en : During the attack state, if the vpbr-max-attn value + is reached, the error condition still remains, and this bit is set, the audio + is muted. + +Example: + +cs35l36: cs35l36@40 { + compatible = "cirrus,cs35l36"; + reg = <0x40>; + VA-supply = <&dummy_vreg>; + VP-supply = <&dummy_vreg>; + reset-gpios = <&gpio0 54 0>; + interrupt-parent = <&gpio8>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + + cirrus,boost-ind-nanohenry = <1000>; + cirrus,boost-ctl-millivolt = <10000>; + cirrus,boost-peak-milliamp = <4500>; + cirrus,boost-ctl-select = <0x00>; + cirrus,weak-fet-delay = <0x04>; + cirrus,weak-fet-thld = <0x01>; + cirrus,temp-warn-threshold = <0x01>; + cirrus,multi-amp-mode; + cirrus,irq-drive-select = <0x01>; + cirrus,irq-gpio-select = <0x01>; + + cirrus,vpbr-config { + cirrus,vpbr-en = <0x00>; + cirrus,vpbr-thld = <0x05>; + cirrus,vpbr-atk-rate = <0x02>; + cirrus,vpbr-atk-vol = <0x01>; + cirrus,vpbr-max-attn = <0x09>; + cirrus,vpbr-wait = <0x01>; + cirrus,vpbr-rel-rate = <0x05>; + cirrus,vpbr-mute-en = <0x00>; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt index fdcea3d12ee5..e7d17dda55db 100644 --- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt +++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt @@ -30,6 +30,7 @@ Required properties - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node. - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node. - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node. + Optional Properties: - qcom,mbhc-vthreshold-low: Array of 5 threshold voltages in mV for 5 buttons detection on headset when the mbhc is powered up @@ -92,9 +93,9 @@ spmi_bus { "cdc_ear_cnp_int", "cdc_hphr_cnp_int", "cdc_hphl_cnp_int"; - VDD-CDC-IO-supply = <&pm8916_l5>; - VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>; - VDD-MICBIAS-supply = <&pm8916_l13>; + vdd-cdc-io-supply = <&pm8916_l5>; + vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>; + vdd-micbias-supply = <&pm8916_l13>; #sound-dai-cells = <1>; }; }; diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 25170ad7d25b..101f2b2c69ad 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -533,16 +533,12 @@ Bridge VLAN filtering function that the driver has to call for each VLAN the given port is a member of. A switchdev object is used to carry the VID and bridge flags. -- port_fdb_prepare: bridge layer function invoked when the bridge prepares the - installation of a Forwarding Database entry. If the operation is not - supported, this function should return -EOPNOTSUPP to inform the bridge code - to fallback to a software implementation. No hardware setup must be done in - this function. See port_fdb_add for this and details. - - port_fdb_add: bridge layer function invoked when the bridge wants to install a Forwarding Database entry, the switch hardware should be programmed with the specified address in the specified VLAN Id in the forwarding database - associated with this VLAN ID + associated with this VLAN ID. If the operation is not supported, this + function should return -EOPNOTSUPP to inform the bridge code to fallback to + a software implementation. Note: VLAN ID 0 corresponds to the port private database, which, in the context of DSA, would be the its port-based VLAN, used by the associated bridge device. diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst @@ -7,7 +7,7 @@ Intro ===== The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. -The feature is currently implemented for TCP sockets. +The feature is currently implemented for TCP and UDP sockets. Opportunity and Caveats diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt index 355c6d8ef8ad..b203d1334822 100644 --- a/Documentation/networking/operstates.txt +++ b/Documentation/networking/operstates.txt @@ -22,8 +22,9 @@ and changeable from userspace under certain rules. 2. Querying from userspace Both admin and operational state can be queried via the netlink -operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK -to be notified of updates. This is important for setting from userspace. +operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK +to be notified of updates while the interface is admin up. This is +important for setting from userspace. These values contain interface state: @@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to complete. Corresponding functions are netif_dormant_on() to set the flag, netif_dormant_off() to clear it and netif_dormant() to query. -On device allocation, networking core sets the flags equivalent to -netif_carrier_ok() and !netif_dormant(). +On device allocation, both flags __LINK_STATE_NOCARRIER and +__LINK_STATE_DORMANT are cleared, so the effective state is equivalent +to netif_carrier_ok() and !netif_dormant(). Whenever the driver CHANGES one of these flags, a workqueue event is @@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the driver. Afterwards, the userspace application can set IFLA_OPERSTATE to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set netif_carrier_off() or netif_dormant_on(). Changes made by userspace -are multicasted on the netlink group RTMGRP_LINK. +are multicasted on the netlink group RTNLGRP_LINK. So basically a 802.1X supplicant interacts with the kernel like this: --subscribe to RTMGRP_LINK +-subscribe to RTNLGRP_LINK -set IFLA_LINKMODE to 1 via RTM_SETLINK -query RTM_GETLINK once to get initial state -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 82236a17b5e6..97b7ca8b9b86 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -92,11 +92,11 @@ device. Switch ID ^^^^^^^^^ -The switchdev driver must implement the switchdev op switchdev_port_attr_get -for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same -physical ID for each port of a switch. The ID must be unique between switches -on the same system. The ID does not need to be unique between switches on -different systems. +The switchdev driver must implement the net_device operation +ndo_get_port_parent_id for each port netdev, returning the same physical ID for +each port of a switch. The ID must be unique between switches on the same +system. The ID does not need to be unique between switches on different +systems. The switch ID is used to locate ports on a switch and to know if aggregated ports belong to the same switch. diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst index dc2ddc345044..fbb9297e6360 100644 --- a/Documentation/process/applying-patches.rst +++ b/Documentation/process/applying-patches.rst @@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to generate a patch representing the differences between two patches and then apply the result. -This will let you move from something like 4.7.2 to 4.7.3 in a single +This will let you move from something like 5.7.2 to 5.7.3 in a single step. The -z flag to interdiff will even let you feed it patches in gzip or bzip2 compressed form directly without the use of zcat or bzcat or manual decompression. -Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: +Here's how you'd go from 5.7.2 to 5.7.3 in a single step:: - interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 + interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1 Although interdiff may save you a step or two you are generally advised to do the additional steps since interdiff can get things wrong in some cases. @@ -245,62 +245,67 @@ The patches are available at http://kernel.org/ Most recent patches are linked from the front page, but they also have specific homes. -The 4.x.y (-stable) and 4.x patches live at +The 5.x.y (-stable) and 5.x patches live at - https://www.kernel.org/pub/linux/kernel/v4.x/ + https://www.kernel.org/pub/linux/kernel/v5.x/ -The -rc patches live at +The -rc patches are not stored on the webserver but are generated on +demand from git tags such as - https://www.kernel.org/pub/linux/kernel/v4.x/testing/ + https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0 +The stable -rc patches live at -The 4.x kernels + https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/ + + +The 5.x kernels =============== These are the base stable releases released by Linus. The highest numbered release is the most recent. If regressions or other serious flaws are found, then a -stable fix patch -will be released (see below) on top of this base. Once a new 4.x base +will be released (see below) on top of this base. Once a new 5.x base kernel is released, a patch is made available that is a delta between the -previous 4.x kernel and the new one. +previous 5.x kernel and the new one. -To apply a patch moving from 4.6 to 4.7, you'd do the following (note -that such patches do **NOT** apply on top of 4.x.y kernels but on top of the -base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to -first revert the 4.x.y patch). +To apply a patch moving from 5.6 to 5.7, you'd do the following (note +that such patches do **NOT** apply on top of 5.x.y kernels but on top of the +base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to +first revert the 5.x.y patch). Here are some examples:: - # moving from 4.6 to 4.7 + # moving from 5.6 to 5.7 - $ cd ~/linux-4.6 # change to kernel source dir - $ patch -p1 < ../patch-4.7 # apply the 4.7 patch + $ cd ~/linux-5.6 # change to kernel source dir + $ patch -p1 < ../patch-5.7 # apply the 5.7 patch $ cd .. - $ mv linux-4.6 linux-4.7 # rename source dir + $ mv linux-5.6 linux-5.7 # rename source dir - # moving from 4.6.1 to 4.7 + # moving from 5.6.1 to 5.7 - $ cd ~/linux-4.6.1 # change to kernel source dir - $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch - # source dir is now 4.6 - $ patch -p1 < ../patch-4.7 # apply new 4.7 patch + $ cd ~/linux-5.6.1 # change to kernel source dir + $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch + # source dir is now 5.6 + $ patch -p1 < ../patch-5.7 # apply new 5.7 patch $ cd .. - $ mv linux-4.6.1 linux-4.7 # rename source dir + $ mv linux-5.6.1 linux-5.7 # rename source dir -The 4.x.y kernels +The 5.x.y kernels ================= Kernels with 3-digit versions are -stable kernels. They contain small(ish) critical fixes for security problems or significant regressions discovered -in a given 4.x kernel. +in a given 5.x kernel. This is the recommended branch for users who want the most recent stable kernel and are not interested in helping test development/experimental versions. -If no 4.x.y kernel is available, then the highest numbered 4.x kernel is +If no 5.x.y kernel is available, then the highest numbered 5.x kernel is the current stable kernel. .. note:: @@ -308,23 +313,23 @@ the current stable kernel. The -stable team usually do make incremental patches available as well as patches against the latest mainline release, but I only cover the non-incremental ones below. The incremental ones can be found at - https://www.kernel.org/pub/linux/kernel/v4.x/incr/ + https://www.kernel.org/pub/linux/kernel/v5.x/incr/ -These patches are not incremental, meaning that for example the 4.7.3 -patch does not apply on top of the 4.7.2 kernel source, but rather on top -of the base 4.7 kernel source. +These patches are not incremental, meaning that for example the 5.7.3 +patch does not apply on top of the 5.7.2 kernel source, but rather on top +of the base 5.7 kernel source. -So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel -source you have to first back out the 4.7.2 patch (so you are left with a -base 4.7 kernel source) and then apply the new 4.7.3 patch. +So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel +source you have to first back out the 5.7.2 patch (so you are left with a +base 5.7 kernel source) and then apply the new 5.7.3 patch. Here's a small example:: - $ cd ~/linux-4.7.2 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch - $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch + $ cd ~/linux-5.7.2 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch + $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch $ cd .. - $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir + $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir The -rc kernels =============== @@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing development kernels but do not want to run some of the really experimental stuff (such people should see the sections about -next and -mm kernels below). -The -rc patches are not incremental, they apply to a base 4.x kernel, just -like the 4.x.y patches described above. The kernel version before the -rcN +The -rc patches are not incremental, they apply to a base 5.x kernel, just +like the 5.x.y patches described above. The kernel version before the -rcN suffix denotes the version of the kernel that this -rc kernel will eventually turn into. -So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 -kernel and the patch should be applied on top of the 4.7 kernel source. +So, 5.8-rc5 means that this is the fifth release candidate for the 5.8 +kernel and the patch should be applied on top of the 5.7 kernel source. Here are 3 examples of how to apply these patches:: - # first an example of moving from 4.7 to 4.8-rc3 + # first an example of moving from 5.7 to 5.8-rc3 - $ cd ~/linux-4.7 # change to the 4.7 source dir - $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch + $ cd ~/linux-5.7 # change to the 5.7 source dir + $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch $ cd .. - $ mv linux-4.7 linux-4.8-rc3 # rename the source dir + $ mv linux-5.7 linux-5.8-rc3 # rename the source dir - # now let's move from 4.8-rc3 to 4.8-rc5 + # now let's move from 5.8-rc3 to 5.8-rc5 - $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir - $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch + $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir + $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch $ cd .. - $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir + $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir - # finally let's try and move from 4.7.3 to 4.8-rc5 + # finally let's try and move from 5.7.3 to 5.8-rc5 - $ cd ~/linux-4.7.3 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch + $ cd ~/linux-5.7.3 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch $ cd .. - $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir + $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir The -mm patches and the linux-next tree diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst index f6845b2278ea..77f67ded53de 100644 --- a/Documentation/sound/soc/dpcm.rst +++ b/Documentation/sound/soc/dpcm.rst @@ -13,7 +13,7 @@ drivers that expose several ALSA PCMs and can route to multiple DAIs. The DPCM runtime routing is determined by the ALSA mixer settings in the same way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM graph representing the DSP internal audio paths and uses the mixer settings to -determine the patch used by each ALSA PCM. +determine the path used by each ALSA PCM. DPCM re-uses all the existing component codec, platform and DAI drivers without any modifications. @@ -101,7 +101,7 @@ The audio driver processes this as follows :- 4. Machine driver or audio HAL enables the speaker path. -5. DPCM runs the PCM ops for startup(), hw_params(), prepapre() and +5. DPCM runs the PCM ops for startup(), hw_params(), prepare() and trigger(start) for DAI1 Speakers since the path is enabled. In this example, the machine driver or userspace audio HAL can alter the routing @@ -221,7 +221,7 @@ like a BT phone call :- This allows the host CPU to sleep while the DSP, MODEM DAI and the BT DAI are still in operation. -A BE DAI link can also set the codec to a dummy device if the code is a device +A BE DAI link can also set the codec to a dummy device if the codec is a device that is managed externally. Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the @@ -249,7 +249,7 @@ configuration. struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); - /* The DSP will covert the FE rate to 48k, stereo */ + /* The DSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; @@ -386,5 +386,3 @@ This means creating a new FE that is connected with a virtual path to both DAI links. The DAI links will be started when the FE PCM is started and stopped when the FE PCM is stopped. Note that the FE PCM cannot read or write data in this configuration. - - diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 819caf8ca05f..ebc679bcb2dc 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -56,26 +56,34 @@ of any kernel data structures. dentry-state: -From linux/fs/dentry.c: +From linux/include/linux/dcache.h: -------------------------------------------------------------- -struct { +struct dentry_stat_t dentry_stat { int nr_dentry; int nr_unused; int age_limit; /* age in seconds */ int want_pages; /* pages requested by system */ - int dummy[2]; -} dentry_stat = {0, 0, 45, 0,}; --------------------------------------------------------------- - -Dentries are dynamically allocated and deallocated, and -nr_dentry seems to be 0 all the time. Hence it's safe to -assume that only nr_unused, age_limit and want_pages are -used. Nr_unused seems to be exactly what its name says. + int nr_negative; /* # of unused negative dentries */ + int dummy; /* Reserved for future use */ +}; +-------------------------------------------------------------- + +Dentries are dynamically allocated and deallocated. + +nr_dentry shows the total number of dentries allocated (active ++ unused). nr_unused shows the number of dentries that are not +actively used, but are saved in the LRU list for future reuse. + Age_limit is the age in seconds after which dcache entries can be reclaimed when memory is short and want_pages is nonzero when shrink_dcache_pages() has been called and the dcache isn't pruned yet. +nr_negative shows the number of unused dentries that are also +negative dentries which do not map to any files. Instead, +they help speeding up rejection of non-existing files provided +by the users. + ============================================================== dquot-max & dquot-nr: diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst index 80f5ffc94a9e..b37166817842 100644 --- a/Documentation/translations/it_IT/admin-guide/README.rst +++ b/Documentation/translations/it_IT/admin-guide/README.rst @@ -4,7 +4,7 @@ .. _it_readme: -Rilascio del kernel Linux 4.x <http://kernel.org/> +Rilascio del kernel Linux 5.x <http://kernel.org/> =================================================== .. warning:: diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt index e8e8d14d3c4e..c1f95b59e14d 100644 --- a/Documentation/x86/resctrl_ui.txt +++ b/Documentation/x86/resctrl_ui.txt @@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com> Tony Luck <tony.luck@intel.com> Vikas Shivappa <vikas.shivappa@intel.com> -This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo +This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo flag bits: RDT (Resource Director Technology) Allocation - "rdt_a" CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" diff --git a/MAINTAINERS b/MAINTAINERS index 56fbeca185cb..24b9e3578d3a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c F: include/uapi/linux/wmi.h AD1889 ALSA SOUND DRIVER -M: Thibaut Varene <T-Bone@parisc-linux.org> -W: http://wiki.parisc-linux.org/AD1889 +W: https://parisc.wiki.kernel.org/index.php/AD1889 L: linux-parisc@vger.kernel.org S: Maintained F: sound/pci/ad1889.* @@ -2848,8 +2847,11 @@ F: include/uapi/linux/if_bonding.h BPF (Safe dynamic programs and tools) M: Alexei Starovoitov <ast@kernel.org> M: Daniel Borkmann <daniel@iogearbox.net> +R: Martin KaFai Lau <kafai@fb.com> +R: Song Liu <songliubraving@fb.com> +R: Yonghong Song <yhs@fb.com> L: netdev@vger.kernel.org -L: linux-kernel@vger.kernel.org +L: bpf@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 @@ -2873,10 +2875,13 @@ F: samples/bpf/ F: tools/bpf/ F: tools/lib/bpf/ F: tools/testing/selftests/bpf/ +K: bpf +N: bpf BPF JIT for ARM M: Shubham Bansal <illusionist.neo@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/arm/net/ @@ -2885,18 +2890,21 @@ M: Daniel Borkmann <daniel@iogearbox.net> M: Alexei Starovoitov <ast@kernel.org> M: Zi Shen Lim <zlim.lnx@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/arm64/net/ BPF JIT for MIPS (32-BIT AND 64-BIT) M: Paul Burton <paul.burton@mips.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs M: Jakub Kicinski <jakub.kicinski@netronome.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: drivers/net/ethernet/netronome/nfp/bpf/ @@ -2904,6 +2912,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT) M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> M: Sandipan Das <sandipan@linux.ibm.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/powerpc/net/ @@ -2911,6 +2920,7 @@ BPF JIT for S390 M: Martin Schwidefsky <schwidefsky@de.ibm.com> M: Heiko Carstens <heiko.carstens@de.ibm.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/s390/net/ X: arch/s390/net/pnet.c @@ -2918,12 +2928,14 @@ X: arch/s390/net/pnet.c BPF JIT for SPARC (32-BIT AND 64-BIT) M: David S. Miller <davem@davemloft.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/sparc/net/ BPF JIT for X86 32-BIT M: Wang YanQing <udknight@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/x86/net/bpf_jit_comp32.c @@ -2931,6 +2943,7 @@ BPF JIT for X86 64-BIT M: Alexei Starovoitov <ast@kernel.org> M: Daniel Borkmann <daniel@iogearbox.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/x86/net/ X: arch/x86/net/bpf_jit_comp32.c @@ -3385,9 +3398,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic* F: drivers/media/platform/marvell-ccic/ CAIF NETWORK LAYER -M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> L: netdev@vger.kernel.org -S: Supported +S: Orphan F: Documentation/networking/caif/ F: drivers/net/caif/ F: include/uapi/linux/caif/ @@ -5189,7 +5201,7 @@ DRM DRIVERS FOR XEN M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> T: git git://anongit.freedesktop.org/drm/drm-misc L: dri-devel@lists.freedesktop.org -L: xen-devel@lists.xen.org +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: drivers/gpu/drm/xen/ F: Documentation/gpu/xen-front.rst @@ -8490,6 +8502,7 @@ L7 BPF FRAMEWORK M: John Fastabend <john.fastabend@gmail.com> M: Daniel Borkmann <daniel@iogearbox.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: include/linux/skmsg.h F: net/core/skmsg.c @@ -11315,10 +11328,12 @@ F: include/dt-bindings/ OPENCORES I2C BUS DRIVER M: Peter Korsgaard <peter@korsgaard.com> +M: Andrew Lunn <andrew@lunn.ch> L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-ocores F: drivers/i2c/busses/i2c-ocores.c +F: include/linux/platform_data/i2c-ocores.h OPENRISC ARCHITECTURE M: Jonas Bonn <jonas@southpole.se> @@ -11489,7 +11504,7 @@ F: Documentation/blockdev/paride.txt F: drivers/block/paride/ PARISC ARCHITECTURE -M: "James E.J. Bottomley" <jejb@parisc-linux.org> +M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> M: Helge Deller <deller@gmx.de> L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ @@ -12876,6 +12891,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt F: drivers/net/dsa/realtek-smi* F: drivers/net/dsa/rtl83* +REDPINE WIRELESS DRIVER +M: Amitkumar Karwar <amitkarwar@gmail.com> +M: Siva Rebbagondla <siva8118@gmail.com> +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/rsi/ + REGISTER MAP ABSTRACTION M: Mark Brown <broonie@kernel.org> L: linux-kernel@vger.kernel.org @@ -13704,6 +13726,15 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/sfc/ +SFF/SFP/SFP+ MODULE SUPPORT +M: Russell King <linux@armlinux.org.uk> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/phy/phylink.c +F: drivers/net/phy/sfp* +F: include/linux/phylink.h +F: include/linux/sfp.h + SGI GRU DRIVER M: Dimitri Sivanich <sivanich@sgi.com> S: Maintained @@ -16649,6 +16680,15 @@ S: Maintained F: drivers/platform/x86/ F: drivers/platform/olpc/ +X86 PLATFORM DRIVERS - ARCH +R: Darren Hart <dvhart@infradead.org> +R: Andy Shevchenko <andy@infradead.org> +L: platform-driver-x86@vger.kernel.org +L: x86@kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core +S: Maintained +F: arch/x86/platform + X86 VDSO M: Andy Lutomirski <luto@kernel.org> L: linux-kernel@vger.kernel.org @@ -16681,10 +16721,30 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tuner-xc2028.* +XDP (eXpress Data Path) +M: Alexei Starovoitov <ast@kernel.org> +M: Daniel Borkmann <daniel@iogearbox.net> +M: David S. Miller <davem@davemloft.net> +M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jesper Dangaard Brouer <hawk@kernel.org> +M: John Fastabend <john.fastabend@gmail.com> +L: netdev@vger.kernel.org +L: xdp-newbies@vger.kernel.org +L: bpf@vger.kernel.org +S: Supported +F: net/core/xdp.c +F: include/net/xdp.h +F: kernel/bpf/devmap.c +F: kernel/bpf/cpumap.c +F: include/trace/events/xdp.h +K: xdp +N: xdp + XDP SOCKETS (AF_XDP) M: Björn Töpel <bjorn.topel@intel.com> M: Magnus Karlsson <magnus.karlsson@intel.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/xskmap.c F: net/xdp/ @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc8 NAME = Shy Crocodile # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h @@ -56,15 +56,15 @@ #elif defined(CONFIG_ALPHA_DP264) || \ defined(CONFIG_ALPHA_LYNX) || \ - defined(CONFIG_ALPHA_SHARK) || \ - defined(CONFIG_ALPHA_EIGER) + defined(CONFIG_ALPHA_SHARK) # define NR_IRQS 64 #elif defined(CONFIG_ALPHA_TITAN) #define NR_IRQS 80 #elif defined(CONFIG_ALPHA_RAWHIDE) || \ - defined(CONFIG_ALPHA_TAKARA) + defined(CONFIG_ALPHA_TAKARA) || \ + defined(CONFIG_ALPHA_EIGER) # define NR_IRQS 128 #elif defined(CONFIG_ALPHA_WILDFIRE) diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+8 : (r)-10]) + (r) <= 18 ? (r)+10 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 376366a7db81..d750b302d5ab 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -191,7 +191,6 @@ config NR_CPUS config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" - default y if ARC_UBOOT_SUPPORT help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non @@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process +config ARC_IRQ_NO_AUTOSAVE + bool "Disable hardware autosave regfile on interrupts" + default n + help + On HS cores, taken interrupt auto saves the regfile on stack. + This is programmable and can be optionally disabled in which case + software INTERRUPT_PROLOGUE/EPILGUE do the needed work + endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" @@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA endif -config ARC_UBOOT_SUPPORT - bool "Support uboot arg Handling" - help - ARC Linux by default checks for uboot provided args as pointers to - external cmdline or DTB. This however breaks in absence of uboot, - when booting from Metaware debugger directly, as the registers are - not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus - registers look like uboot args to kernel which then chokes. - So only enable the uboot arg checking/processing if users are sure - of uboot being in play. - config ARC_BUILTIN_DTB_NAME string "Built in DTB" help diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 6e84060e7c90..621f59407d76 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 # CONFIG_ARC_HAS_LLSC is not set CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_PREEMPT=y CONFIG_NET=y CONFIG_UNIX=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 1e59a2e9c602..e447ace6fa1c 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index b5c3f6c54b03..c82cdb10aaf4 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -15,8 +15,6 @@ CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set -# CONFIG_ARC_SMP_HALT_ON_RESET is not set -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f1b86cef0905..a27eafdc8260 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { #endif }; +struct bcr_uarch_build_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:8, prod:8, maj:8, min:8; +#else + unsigned int min:8, maj:8, prod:8, pad:8; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index f393b663413e..2ad77fb43639 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -52,6 +52,17 @@ #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES +/* + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit + * alignment for any atomic64_t embedded in buffer. + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed + * value of 4 (and not 8) in ARC ABI. + */ +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) +#define ARCH_SLAB_MINALIGN 8 +#endif + extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3..225e7df2d8ed 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -17,6 +17,33 @@ ; ; Now manually save: r12, sp, fp, gp, r25 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + st.as r9, [sp, -10] ; save r9 in it's final stack slot + sub sp, sp, 12 ; skip JLI, LDI, EI + + PUSH lp_count + PUSHAX lp_start + PUSHAX lp_end + PUSH blink + + PUSH r11 + PUSH r10 + + sub sp, sp, 4 ; skip r9 + + PUSH r8 + PUSH r7 + PUSH r6 + PUSH r5 + PUSH r4 + PUSH r3 + PUSH r2 + PUSH r1 + PUSH r0 +.endif +#endif + #ifdef CONFIG_ARC_HAS_ACCL_REGS PUSH r59 PUSH r58 @@ -86,6 +113,33 @@ POP r59 #endif +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + POP r0 + POP r1 + POP r2 + POP r3 + POP r4 + POP r5 + POP r6 + POP r7 + POP r8 + POP r9 + POP r10 + POP r11 + + POP blink + POPAX lp_end + POPAX lp_start + + POP r9 + mov lp_count, r9 + + add sp, sp, 12 ; skip JLI, LDI, EI + ld.as r9, [sp, -10] ; reload r9 which got clobbered +.endif +#endif + .endm /*------------------------------------------------------------------------*/ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c..eabc3efa6c6d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa..562089d62d9d 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -209,7 +209,9 @@ restore_regs: ;####### Return from Intr ####### debug_marker_l1: - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + btst r0, STATUS_DE_BIT ; Z flag set if bit clear + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..30e090625916 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> +#include <asm/irqflags.h> .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits @@ -90,15 +100,13 @@ ENTRY(stext) st.ab 0, [r5, 4] 1: -#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 - ; r1 = magic number (board identity, unused as of now + ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in setup_arch() + ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r2, [@uboot_arg] -#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3e..cf18b3e5a934 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -49,11 +49,13 @@ void arc_init_IRQ(void) *(unsigned int *)&ictrl = 0; +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_blink = 1; ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ +#endif WRITE_AUX(AUX_IRQ_CTRL, ictrl); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index feb90093e6b1..7b2340996cf8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { - unsigned int exec_ctrl; - READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = !(exec_ctrl & 1); + struct bcr_uarch_build_arcv2 uarch; - /* dual issue always present for this core */ - cpu->extn.dual = 1; + /* + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was + * dual issue only (HS4x). But next uarch rev (1:0) + * allows it be configured for single issue (HS3x) + * Ensure we fiddle with dual issue only on HS4x + */ + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); + + if (uarch.prod == 4) { + unsigned int exec_ctrl; + + /* dual issue hardware always present */ + cpu->extn.dual = 1; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + + /* dual issue hardware enabled ? */ + cpu->extn.dual_enb = !(exec_ctrl & 1); + + } } } READ_BCR(ARC_REG_AP_BCR, ap); if (ap.ver) { cpu->extn.ap_num = 2 << ap.num; - cpu->extn.ap_full = !!ap.min; + cpu->extn.ap_full = !ap.min; } READ_BCR(ARC_REG_SMART_BCR, bcr); @@ -462,43 +478,78 @@ void setup_processor(void) arc_chk_core_config(); } -static inline int is_kernel(unsigned long addr) +static inline bool uboot_arg_invalid(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return 0; + /* + * Check that it is a untranslated address (although MMU is not enabled + * yet, it being a high address ensures this is not by fluke) + */ + if (addr < PAGE_OFFSET) + return true; + + /* Check that address doesn't clobber resident kernel image */ + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; } -void __init setup_arch(char **cmdline_p) +#define IGNORE_ARGS "Ignore U-boot args: " + +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ +#define UBOOT_TAG_NONE 0 +#define UBOOT_TAG_CMDLINE 1 +#define UBOOT_TAG_DTB 2 + +void __init handle_uboot_args(void) { -#ifdef CONFIG_ARC_UBOOT_SUPPORT - /* make sure that uboot passed pointer to cmdline/dtb is valid */ - if (uboot_tag && is_kernel((unsigned long)uboot_arg)) - panic("Invalid uboot arg\n"); - - /* See if u-boot passed an external Device Tree blob */ - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) -#endif - { - /* No, so try the embedded one */ + bool use_embedded_dtb = true; + bool append_cmdline = false; + + /* check that we know this tag */ + if (uboot_tag != UBOOT_TAG_NONE && + uboot_tag != UBOOT_TAG_CMDLINE && + uboot_tag != UBOOT_TAG_DTB) { + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); + goto ignore_uboot_args; + } + + if (uboot_tag != UBOOT_TAG_NONE && + uboot_arg_invalid((unsigned long)uboot_arg)) { + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); + goto ignore_uboot_args; + } + + /* see if U-boot passed an external Device Tree blob */ + if (uboot_tag == UBOOT_TAG_DTB) { + machine_desc = setup_machine_fdt((void *)uboot_arg); + + /* external Device Tree blob is invalid - use embedded one */ + use_embedded_dtb = !machine_desc; + } + + if (uboot_tag == UBOOT_TAG_CMDLINE) + append_cmdline = true; + +ignore_uboot_args: + + if (use_embedded_dtb) { machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) panic("Embedded DT invalid\n"); + } - /* - * If we are here, it is established that @uboot_arg didn't - * point to DT blob. Instead if u-boot says it is cmdline, - * append to embedded DT cmdline. - * setup_machine_fdt() would have populated @boot_command_line - */ - if (uboot_tag == 1) { - /* Ensure a whitespace between the 2 cmdlines */ - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, uboot_arg, - COMMAND_LINE_SIZE); - } + /* + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this + * append processing can only happen after. + */ + if (append_cmdline) { + /* Ensure a whitespace between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); } +} + +void __init setup_arch(char **cmdline_p) +{ + handle_uboot_args(); /* Save unparsed command line copy for /proc/cmdline */ *cmdline_p = boot_command_line; diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58..ea14b0bf3116 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -25,15 +25,11 @@ #endif #ifdef CONFIG_ARC_HAS_LL64 -# define PREFETCH_READ(RX) prefetch [RX, 56] -# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else -# define PREFETCH_READ(RX) prefetch [RX, 28] -# define PREFETCH_WRITE(RX) prefetchw [RX, 32] # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 @@ -41,8 +37,6 @@ #endif ENTRY_CFI(memcpy) - prefetch [r1] ; Prefetch the read location - prefetchw [r0] ; Prefetch the write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) - PREFETCH_READ (r1) - PREFETCH_WRITE (r3) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 24) or r7, r7, r5 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 16) or r7, r7, r5 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index f25c085b9874..23e00216e5a5 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1400,6 +1400,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index b67f5fee1469..dce5be5df97b 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -729,7 +729,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &tscadc { diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 172c0224e7f6..b128998097ce 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -651,13 +651,13 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <1>; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <2>; }; diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index d0fd68873689..5b250060f6dd 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts @@ -215,7 +215,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; bus-width = <0x4>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; cd-inverted; max-frequency = <26000000>; vmmc-supply = <&vmmcsd_fixed>; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed..5d04dc68cf57 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -144,30 +144,32 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "U-Boot"; - reg = <0 0x800000>; - }; - partition@800000 { - label = "Linux"; - reg = <0x800000 0x800000>; - }; - partition@1000000 { - label = "Filesystem"; - reg = <0x1000000 0x3f000000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0 0x800000>; + }; + partition@800000 { + label = "Linux"; + reg = <0x800000 0x800000>; + }; + partition@1000000 { + label = "Filesystem"; + reg = <0x1000000 0x3f000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83..b4cca507cf13 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -160,12 +160,15 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb8..87dcb502f72d 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -81,49 +81,52 @@ }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x000e0000>; - read-only; - }; - - partition@e0000 { - label = "u-boot-env"; - reg = <0x000e0000 0x00020000>; - read-only; - }; - - partition@100000 { - label = "u-boot-env2"; - reg = <0x00100000 0x00020000>; - read-only; - }; - - partition@120000 { - label = "zImage"; - reg = <0x00120000 0x00400000>; - }; - - partition@520000 { - label = "initrd"; - reg = <0x00520000 0x00400000>; - }; - partition@e00000 { - label = "boot"; - reg = <0x00e00000 0x3f200000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "u-boot"; + reg = <0x00000000 0x000e0000>; + read-only; + }; + + partition@e0000 { + label = "u-boot-env"; + reg = <0x000e0000 0x00020000>; + read-only; + }; + + partition@100000 { + label = "u-boot-env2"; + reg = <0x00100000 0x00020000>; + read-only; + }; + + partition@120000 { + label = "zImage"; + reg = <0x00120000 0x00400000>; + }; + + partition@520000 { + label = "initrd"; + reg = <0x00520000 0x00400000>; + }; + + partition@e00000 { + label = "boot"; + reg = <0x00e00000 0x3f200000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92..559659b399d0 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -476,7 +476,7 @@ clocksource: timer@20000 { compatible = "ti,da830-timer"; reg = <0x20000 0x1000>; - interrupts = <12>, <13>; + interrupts = <21>, <22>; interrupt-names = "tint12", "tint34"; clocks = <&pll0_auxclk>; }; diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts index 5edf858c8b86..a31b17eaf51c 100644 --- a/arch/arm/boot/dts/imx6q-pistachio.dts +++ b/arch/arm/boot/dts/imx6q-pistachio.dts @@ -103,7 +103,7 @@ power { label = "Power Button"; gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - gpio-key,wakeup; + wakeup-source; linux,code = <KEY_POWER>; }; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index d8163705363e..4a31a415f88e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -309,7 +309,7 @@ pinctrl-2 = <&pinctrl_usdhc3_200mhz>; cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; keep-power-in-suspend; - enable-sdio-wakeup; + wakeup-source; vmmc-supply = <®_sd3_vmmc>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 272ff6133ec1..d1375d3650fd 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -467,7 +467,7 @@ }; gpt: gpt@2098000 { - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; reg = <0x02098000 0x4000>; interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SX_CLK_GPT_BUS>, diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index e4645f612712..2ab74860d962 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -274,7 +274,7 @@ compatible = "amlogic,meson6-dwmac", "snps,dwmac"; reg = <0xc9410000 0x10000 0xc1108108 0x4>; - interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts index 0872f6e3abf5..d50fc2f60fa3 100644 --- a/arch/arm/boot/dts/meson8b-ec100.dts +++ b/arch/arm/boot/dts/meson8b-ec100.dts @@ -205,8 +205,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index 58669abda259..0f0a46ddf3ff 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts @@ -221,7 +221,6 @@ /* Realtek RTL8211F (0x001cc916) */ eth_phy: ethernet-phy@0 { reg = <0>; - eee-broken-1000t; interrupt-parent = <&gpio_intc>; /* GPIOH_3 */ interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -273,8 +272,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b..6ac02beb5fa7 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts @@ -206,8 +206,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0..f57acf8f66b9 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -105,7 +105,7 @@ interrupts-extended = < &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 - &cpcap 48 1 + &cpcap 48 0 >; interrupt-names = "id_ground", "id_float", "se0conn", "vbusvld", diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e53d32691308..93b420934e8e 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -714,11 +714,7 @@ vdda-supply = <&vdac>; - #address-cells = <1>; - #size-cells = <0>; - port { - reg = <0>; venc_out: endpoint { remote-endpoint = <&opa_in>; ti,channels = <1>; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 182a53991c90..826920e6b878 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -814,7 +814,7 @@ /* For debugging, it is often good idea to remove this GPIO. It means you can remove back cover (to reboot by removing battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ + cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529..e142e6c70a59 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -370,6 +370,19 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v4.19 using both N950 + * and N9 devices (OneNAND Manufacturer: Samsung): + * + * gpmc cs0 before gpmc_cs_program_settings: + * cs0 GPMC_CS_CONFIG1: 0xfd001202 + * cs0 GPMC_CS_CONFIG2: 0x00181800 + * cs0 GPMC_CS_CONFIG3: 0x00030300 + * cs0 GPMC_CS_CONFIG4: 0x18001804 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d + * cs0 GPMC_CS_CONFIG6: 0x97080000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; @@ -379,26 +392,27 @@ gpmc,device-width = <2>; gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <122>; + gpmc,cs-wr-off-ns = <122>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <15>; + gpmc,adv-wr-off-ns = <15>; + gpmc,oe-on-ns = <20>; + gpmc,oe-off-ns = <122>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; + gpmc,we-off-ns = <122>; + gpmc,rd-cycle-ns = <148>; + gpmc,wr-cycle-ns = <148>; + gpmc,access-ns = <117>; gpmc,page-burst-access-ns = <15>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; - gpmc,sync-clk-ps = <15000>; + gpmc,clk-activation-ns = <10>; + gpmc,wr-data-mux-bus-ns = <40>; + gpmc,wr-access-ns = <117>; + + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ /* * MTD partition table corresponding to Nokia's MeeGo 1.2 diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -644,6 +644,17 @@ }; }; +/* Configure pwm clock source for timers 8 & 9 */ +&timer8 { + assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + +&timer9 { + assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + /* * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for * uart1 wakeirq. diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -317,7 +317,8 @@ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) >; }; @@ -385,7 +386,8 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; reg = <0x48>; interrupt-controller; #interrupt-cells = <2>; @@ -651,7 +653,8 @@ pinctrl-names = "default"; pinctrl-0 = <&twl6040_pins>; - interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; /* audpwron gpio defined in the board specific dts */ diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -181,6 +181,13 @@ OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ >; }; + + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { + pinctrl-single,pins = < + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) + >; + }; }; &omap5_pmx_core { @@ -414,8 +421,11 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ reg = <0x48>; + pinctrl-0 = <&palmas_sys_nirq_pins>; + pinctrl-names = "default"; + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <2>; ti,system-power-controller; diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 9c7e309d9c2c..0960348002ad 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi @@ -1046,8 +1046,6 @@ <SYSC_IDLE_SMART>, <SYSC_IDLE_SMART_WKUP>; ti,syss-mask = <1>; - ti,no-reset-on-init; - ti,no-idle-on-init; /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; clock-names = "fck"; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 3cc33f7ff7fe..3adc158a40bb 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -1681,15 +1681,12 @@ du: display@feb00000 { compatible = "renesas,du-r8a7743"; - reg = <0 0xfeb00000 0 0x40000>, - <0 0xfeb90000 0 0x1c>; - reg-names = "du", "lvds.0"; + reg = <0 0xfeb00000 0 0x40000>; interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 724>, - <&cpg CPG_MOD 723>, - <&cpg CPG_MOD 726>; - clock-names = "du.0", "du.1", "lvds.0"; + <&cpg CPG_MOD 723>; + clock-names = "du.0", "du.1"; status = "disabled"; ports { @@ -1704,6 +1701,33 @@ port@1 { reg = <1>; du_out_lvds0: endpoint { + remote-endpoint = <&lvds0_in>; + }; + }; + }; + }; + + lvds0: lvds@feb90000 { + compatible = "renesas,r8a7743-lvds"; + reg = <0 0xfeb90000 0 0x1c>; + clocks = <&cpg CPG_MOD 726>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 726>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds0_in: endpoint { + remote-endpoint = <&du_out_lvds0>; + }; + }; + port@1 { + reg = <1>; + lvds0_out: endpoint { }; }; }; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -719,7 +719,6 @@ pm_qos = <&qos_lcdc0>, <&qos_lcdc1>, <&qos_cif0>, - <&qos_cif1>, <&qos_ipp>, <&qos_rga>; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 353d90f99b40..13304b8c5139 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -216,6 +216,7 @@ #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <24000000>; + clock-output-names = "osc24M"; }; osc32k: clk-32k { diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2..25540b7694d5 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -53,7 +53,7 @@ aliases { serial0 = &uart0; - /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ + ethernet0 = &emac; ethernet1 = &sdiowifi; }; diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987e..bc85b6a166c7 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -13,10 +13,25 @@ stdout-path = "serial0:115200n8"; }; - memory@80000000 { + /* + * Note that recent version of the device tree compiler (starting with + * version 1.4.2) warn about this node containing a reg property, but + * missing a unit-address. However, the bootloader on these Chromebook + * devices relies on the full name of this node to be exactly /memory. + * Adding the unit-address causes the bootloader to create a /memory + * node and write the memory bank configuration to that node, which in + * turn leads the kernel to believe that the device has 2 GiB of + * memory instead of the amount detected by the bootloader. + * + * The name of this node is effectively ABI and must not be changed. + */ + memory { + device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; }; + /delete-node/ memory@80000000; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 689c8930dce3..b08d561d6748 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts @@ -110,11 +110,11 @@ bus-num = <3>; status = "okay"; spi-slave; + #address-cells = <0>; - slave@0 { + slave { compatible = "lwn,bk4"; spi-max-frequency = <30000000>; - reg = <0>; }; }; diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -25,7 +25,6 @@ #ifndef __ASSEMBLY__ struct irqaction; struct pt_regs; -extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -48,6 +48,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -147,6 +148,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -186,6 +194,8 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; + struct vcpu_reset_state reset_state; + /* Detect first run of a vcpu */ bool has_run_once; }; diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) #define S2_PMD_MASK PMD_MASK #define S2_PMD_SIZE PMD_SIZE +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return true; +} + #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -254,7 +254,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu_cp15(vcpu, num) == 0x42424242) - panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); + WARN(vcpu_cp15(vcpu, num) == 0x42424242, + "Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -26,6 +26,7 @@ #include <asm/cputype.h> #include <asm/kvm_arm.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> #include <kvm/arm_arch_timer.h> @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (READ_ONCE(vcpu->arch.reset_state.reset)) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (target_pc & 1) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset arch_timer context */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 318394ed5c7a..95a11d5b3587 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus, } else /* remote PCI bus */ base = cnspci->cfg1_regs + ((busno & 0xf) << 20); - return base + (where & 0xffc) + (devfn << 12); + return base + where + (devfn << 12); } static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, @@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, u32 mask = (0x1ull << (size * 8)) - 1; int shift = (where % 4) * 8; - ret = pci_generic_config_read32(bus, devfn, where, size, val); + ret = pci_generic_config_read(bus, devfn, where, size, val); if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && (where & 0xffc) == PCI_CLASS_REVISION) diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b04..23e8c93515d4 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -75,8 +75,7 @@ void __init n2100_map_io(void) /* * N2100 PCI. */ -static int __init -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); + /* Enter broadcast mode for periodic timers */ + tick_broadcast_enable(); + + /* Enter broadcast mode for one-shot timers */ tick_broadcast_enter(); /* @@ -218,15 +222,6 @@ fail: return index; } -/* - * For each cpu, setup the broadcast timer because local timers - * stops for the states above C1. - */ -static void omap_setup_broadcast_timer(void *arg) -{ - tick_broadcast_enable(); -} - static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -319,8 +314,5 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; - /* Configure the broadcast timer on each cpu */ - on_each_cpu(omap_setup_broadcast_timer, NULL, 1); - return cpuidle_register(idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) u32 enable_mask, enable_shift; u32 pipd_mask, pipd_shift; u32 reg; + int ret; if (dsi_id == 0) { enable_mask = OMAP4_DSI1_LANEENABLE_MASK; @@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) return -ENODEV; } - regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); + ret = regmap_read(omap4_dsi_mux_syscon, + OMAP4_DSIPHY_SYSCON_OFFSET, + ®); + if (ret) + return ret; reg &= ~enable_mask; reg &= ~pipd_mask; diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -50,6 +50,9 @@ #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 +#define SYS_NIRQ1_EXT_SYS_IRQ_1 7 +#define SYS_NIRQ2_EXT_SYS_IRQ_2 119 + static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); @@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) irq_chip_unmask_parent(d); } +/* + * The sys_nirq pins bypass peripheral modules and are wired directly + * to MPUSS wakeupgen. They get automatically inverted for GIC. + */ +static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) +{ + bool inverted = false; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type &= ~IRQ_TYPE_LEVEL_MASK; + type |= IRQ_TYPE_LEVEL_HIGH; + inverted = true; + break; + case IRQ_TYPE_EDGE_FALLING: + type &= ~IRQ_TYPE_EDGE_BOTH; + type |= IRQ_TYPE_EDGE_RISING; + inverted = true; + break; + default: + break; + } + + if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && + d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) + pr_warn("wakeupgen: irq%li polarity inverted in dts\n", + d->hwirq); + + return irq_chip_set_type_parent(d, type); +} + #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); @@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_type = irq_chip_set_type_parent, + .irq_set_type = wakeupgen_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383f..a32c3b631484 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c @@ -3,6 +3,7 @@ #include <linux/suspend.h> #include <asm/suspend.h> #include "smc.h" +#include "pm.h" static int tango_pm_powerdown(unsigned long arg) { @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { .valid = suspend_valid_only_mem, }; -static int __init tango_pm_init(void) +void __init tango_pm_init(void) { suspend_set_ops(&tango_pm_ops); - return 0; } - -late_initcall(tango_pm_init); diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 000000000000..35ea705a0ee2 --- /dev/null +++ b/arch/arm/mach-tango/pm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SUSPEND +void __init tango_pm_init(void); +#else +#define tango_pm_init NULL +#endif diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9..824f90737b04 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c @@ -2,6 +2,7 @@ #include <asm/mach/arch.h> #include <asm/hardware/cache-l2x0.h> #include "smc.h" +#include "pm.h" static void tango_l2c_write(unsigned long val, unsigned int reg) { @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") .dt_compat = tango_dt_compat, .l2c_aux_mask = ~0, .l2c_write_sec = tango_l2c_write, + .init_late = tango_pm_init, MACHINE_END diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) return; arm_teardown_iommu_dma_ops(dev); + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ + set_dma_ops(dev, NULL); } diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1..f51919974183 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) if (ssp == NULL) return -ENODEV; - iounmap(ssp->mmio_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); @@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) list_del(&ssp->node); mutex_unlock(&ssp_lock); - kfree(ssp); return 0; } diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, (unsigned char *)optprobe_template_entry, + memcpy(code, (unsigned long *)&optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cb44aa290e73..e1d44b903dfc 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -7,7 +7,6 @@ #include <linux/of_address.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index b0c64f75792c..8974b5a1d3b1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -188,6 +188,7 @@ reg = <0x3a3>; interrupt-parent = <&r_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 837a03dee875..2abb335145a6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -390,7 +390,7 @@ }; video-codec@1c0e000 { - compatible = "allwinner,sun50i-h5-video-engine"; + compatible = "allwinner,sun50i-a64-video-engine"; reg = <0x01c0e000 0x1000>; clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, <&ccu CLK_DRAM_VE>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index e14e0ce7e89f..016641a41694 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -187,8 +187,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 8cd50b75171d..ade2ee09ae96 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -305,8 +305,7 @@ max-frequency = <200000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddio_ao3v3>; vqmmc-supply = <&vddio_tf>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 4cf7f6e80c6a..25105ac96d55 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 2e1cd5e3a246..1cc9dc68ef00 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -258,8 +258,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index ce862266b9aa..0be0f2a5d2fe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi @@ -196,8 +196,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 93a4acf2c46c..ad4d50bd9d77 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -154,8 +154,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index ec09bb5792b7..2d2db783c44c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -211,8 +211,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vcc_3v3>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index f1c410e2da2b..796baea7a0bf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts @@ -131,8 +131,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index db293440e4ca..255cede7b447 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; vqmmc-supply = <&vcc_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6739697be1de..9cbdb85fb591 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -183,8 +183,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index a1b31013ab6e..bc811a2faf42 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -137,8 +137,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3c3a667a8df8..3f086ed7de05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -356,8 +356,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index f7a1cffab4a8..8acfd40090d2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -147,8 +147,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 7212dc4531e4..7fa20a8ede17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -170,8 +170,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts @@ -227,34 +227,34 @@ pinctrl_usdhc1_100mhz: usdhc1-100grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; pinctrl_usdhc1_200mhz: usdhc1-200grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -360,6 +360,8 @@ <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, <&clk IMX8MQ_CLK_USDHC1_ROOT>; clock-names = "ipg", "ahb", "per"; + assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; + assigned-clock-rates = <400000000>; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index 5b4a9609e31f..2468762283a5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -351,7 +351,7 @@ reg = <0>; pinctrl-names = "default"; pinctrl-0 = <&cp0_copper_eth_phy_reset>; - reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; + reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 99b7495455a6..838e32cc14c9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -404,7 +404,7 @@ }; intc: interrupt-controller@9bc0000 { - compatible = "arm,gic-v3"; + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; #interrupt-cells = <3>; interrupt-controller; #redistributor-regions = <1>; diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 20745a8528c5..719ed9d9067d 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi @@ -1011,6 +1011,9 @@ <&cpg CPG_CORE R8A774A1_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index afedbf5728ec..0648d12778ed 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi @@ -1262,6 +1262,9 @@ <&cpg CPG_CORE R8A7796_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 6dc9b1fef830..4b3730f640ef 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi @@ -1068,6 +1068,9 @@ <&cpg CPG_CORE R8A77965_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -40,6 +40,7 @@ pinctrl-0 = <&usb30_host_drv>; regulator-name = "vcc_host_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; @@ -51,6 +52,7 @@ pinctrl-0 = <&usb20_host_drv>; regulator-name = "vcc_host1_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -22,7 +22,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts @@ -43,7 +43,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -91,7 +91,7 @@ pinctrl-0 = <&lcd_panel_reset>; power-supply = <&vcc3v3_s0>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -48,6 +48,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -208,6 +209,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -297,6 +305,9 @@ struct kvm_vcpu_arch { /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; + /* Additional reset state */ + struct vcpu_reset_state reset_state; + /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_addr_valid(kaddr) \ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) +/* + * Given that the GIC architecture permits ITS implementations that can only be + * configured with a LPI table address once, GICv3 systems with many CPUs may + * end up reserving a lot of different regions after a kexec for their LPI + * tables (one per CPU), as we are forced to reuse the same memory after kexec + * (and thus reserve it persistently with EFI beforehand) + */ +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) +#endif + #include <asm-generic/memory_model.h> #endif diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h index 2ba6c6b9541f..71abfc7612b2 100644 --- a/arch/arm64/include/asm/neon-intrinsics.h +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -36,4 +36,8 @@ #include <arm_neon.h> #endif +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + #endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d79a8e5e5e..eecf7927dab0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -539,8 +539,7 @@ set_hcr: /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 - cmp x0, #1 - b.ne 3f + cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 29cdc99688f3..9859e1178e6b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -299,8 +299,10 @@ int swsusp_arch_suspend(void) dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ - if (el2_reset_needed()) + if (el2_reset_needed()) { dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); + dcache_clean_range(__hyp_text_start, __hyp_text_end); + } /* make the crash dump kernel image protected again */ crash_post_resume(); diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index e1261fbaa374..17f325ba831e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -28,6 +28,8 @@ #include <asm/virt.h> .text + .pushsection .hyp.text, "ax" + .align 11 ENTRY(__hyp_stub_vectors) diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index ba6b41790fcd..b09b6f75f759 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index f2c211a6229b..58871333737a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image, { void *buf; size_t buf_size; + size_t cmdline_len; int ret; + cmdline_len = cmdline ? strlen(cmdline) : 0; buf_size = fdt_totalsize(initial_boot_params) - + strlen(cmdline) + DTB_EXTRA_SPACE; + + cmdline_len + DTB_EXTRA_SPACE; for (;;) { buf = vmalloc(buf_size); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..f17afb99890c 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__entry_text_end) || (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || + (addr >= (unsigned long)__hyp_text_start && + addr < (unsigned long)__hyp_text_end) || !!search_exception_tables(addr)) return true; if (!is_kernel_in_hyp_mode()) { - if ((addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - (addr >= (unsigned long)__hyp_idmap_text_start && + if ((addr >= (unsigned long)__hyp_idmap_text_start && addr < (unsigned long)__hyp_idmap_text_end)) return true; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..ddaea0fd2fa4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs) } /* - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a - * We also take into account DIT (bit 24), which is not yet documented, and - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be - * allocated an EL0 meaning in future. + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..009849328289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) arm64_memblock_init(); paging_init(); - efi_apply_persistent_mem_reservations(); acpi_table_upgrade(); @@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); + /* Init percpu seeds for random tags after cpus are set up. */ + kasan_init_tags(); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Make sure init_thread_info.ttbr0 always generates translation diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -23,6 +23,7 @@ #include <kvm/arm_psci.h> #include <asm/cpufeature.h> +#include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> @@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) write_sysreg(kvm_get_hyp_vector(), vbar_el1); } +NOKPROBE_SYMBOL(activate_traps_vhe); static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) { @@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); } +NOKPROBE_SYMBOL(deactivate_traps_vhe); static void __hyp_text __deactivate_traps_nvhe(void) { @@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } +NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) @@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, read_sysreg_el2(esr), read_sysreg_el2(far), read_sysreg(hpfar_el2), par, vcpu); } +NOKPROBE_SYMBOL(__hyp_call_panic_vhe); void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -18,6 +18,7 @@ #include <linux/compiler.h> #include <linux/kvm_host.h> +#include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_hyp.h> @@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); __sysreg_save_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { @@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); __sysreg_restore_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -32,6 +32,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> /* Maximum phys_shift supported for any VM on this host */ @@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset * values. + * + * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT + * ioctl or as part of handling a request issued by another VCPU in the PSCI + * handling code. In the first case, the VCPU will not be loaded, and in the + * second case the VCPU will be loaded. Because this function operates purely + * on the memory-backed valus of system registers, we want to do a full put if + * we were loaded (handling a request) and load the values back at the end of + * the function. Otherwise we leave the state alone. In both cases, we + * disable preemption around the vcpu reset as we would otherwise race with + * preempt notifiers which also call put/load. */ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; + int ret = -EINVAL; + bool loaded; + + preempt_disable(); + loaded = (vcpu->cpu != -1); + if (loaded) + kvm_arch_vcpu_put(vcpu); switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (!cpu_has_32bit_el1()) - return -EINVAL; + goto out; cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; @@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (vcpu->arch.reset_state.reset) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); @@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; /* Reset timer */ - return kvm_timer_vcpu_reset(vcpu); + ret = kvm_timer_vcpu_reset(vcpu); +out: + if (loaded) + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + return ret; } void kvm_set_ipa_limit(void) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, return read_zero(vcpu, p); } -static bool trap_undef(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +/* + * ARMv8.1 mandates at least a trivial LORegion implementation, where all the + * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 + * system, these registers should UNDEF. LORID_EL1 being a RO register, we + * treat it separately. + */ +static bool trap_loregion(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { - kvm_inject_undefined(vcpu); - return false; + u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, + (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); + + if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { + kvm_inject_undefined(vcpu); + return false; + } + + if (p->is_write && sr == SYS_LORID_EL1) + return write_to_read_only(vcpu, p, r); + + return trap_raz_wi(vcpu, p, r); } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, @@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) if (val & ptrauth_mask) kvm_debug("ptrauth unsupported for guests, suppressing\n"); val &= ~ptrauth_mask; - } else if (id == SYS_ID_AA64MMFR1_EL1) { - if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) - kvm_debug("LORegions unsupported for guests, suppressing\n"); - - val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); } return val; @@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, - { SYS_DESC(SYS_LORSA_EL1), trap_undef }, - { SYS_DESC(SYS_LOREA_EL1), trap_undef }, - { SYS_DESC(SYS_LORN_EL1), trap_undef }, - { SYS_DESC(SYS_LORC_EL1), trap_undef }, - { SYS_DESC(SYS_LORID_EL1), trap_undef }, + { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, + { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, + { SYS_DESC(SYS_LORN_EL1), trap_loregion }, + { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_LORID_EL1), trap_loregion }, { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, @@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) table = get_target_table(vcpu->arch.target, true, &num); reset_sys_reg_descs(vcpu, table, num); - for (num = 1; num < NR_SYS_REGS; num++) - if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) - panic("Didn't reset __vcpu_sys_reg(%zi)", num); + for (num = 1; num < NR_SYS_REGS; num++) { + if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, + "Didn't reset __vcpu_sys_reg(%zi)\n", num)) + break; + } } diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..99bb8facb5cb 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, } -static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) +static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, + unsigned long end) { - pte_t *ptep = pte_offset_kernel(pmdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long addr = start; + pte_t *ptep = pte_offset_kernel(pmdp, start); - for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { - addr = start + i * PAGE_SIZE; + do { note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); - } + } while (ptep++, addr += PAGE_SIZE, addr != end); } -static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) +static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, + unsigned long end) { - pmd_t *pmdp = pmd_offset(pudp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pmd_t *pmdp = pmd_offset(pudp, start); - for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + do { pmd_t pmd = READ_ONCE(*pmdp); + next = pmd_addr_end(addr, end); - addr = start + i * PMD_SIZE; if (pmd_none(pmd) || pmd_sect(pmd)) { note_page(st, addr, 3, pmd_val(pmd)); } else { BUG_ON(pmd_bad(pmd)); - walk_pte(st, pmdp, addr); + walk_pte(st, pmdp, addr, next); } - } + } while (pmdp++, addr = next, addr != end); } -static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) +static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, + unsigned long end) { - pud_t *pudp = pud_offset(pgdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pud_t *pudp = pud_offset(pgdp, start); - for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + do { pud_t pud = READ_ONCE(*pudp); + next = pud_addr_end(addr, end); - addr = start + i * PUD_SIZE; if (pud_none(pud) || pud_sect(pud)) { note_page(st, addr, 2, pud_val(pud)); } else { BUG_ON(pud_bad(pud)); - walk_pmd(st, pudp, addr); + walk_pmd(st, pudp, addr, next); } - } + } while (pudp++, addr = next, addr != end); } static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgdp = pgd_offset(mm, 0UL); - unsigned i; - unsigned long addr; + unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; + unsigned long next, addr = start; + pgd_t *pgdp = pgd_offset(mm, start); - for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { + do { pgd_t pgd = READ_ONCE(*pgdp); + next = pgd_addr_end(addr, end); - addr = start + i * PGDIR_SIZE; if (pgd_none(pgd)) { note_page(st, addr, 1, pgd_val(pgd)); } else { BUG_ON(pgd_bad(pgd)); - walk_pud(st, pgdp, addr); + walk_pud(st, pgdp, addr, next); } - } + } while (pgdp++, addr = next, addr != end); } void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 30695a868107..5c9073bace83 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len) __clean_dcache_area_pou(kaddr, len); __flush_icache_all(); } else { - flush_icache_range(addr, addr + len); + /* + * Don't issue kick_all_cpus_sync() after I-cache invalidation + * for user mappings. + */ + __flush_icache_range(addr, addr + len); } } diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..f37a86d2a69d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -252,8 +252,6 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); - kasan_init_tags(); - /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 33a2c94fed0d..63b4a1705182 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -30,6 +30,7 @@ generic-y += pgalloc.h generic-y += preempt.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += tlbflush.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -45,8 +45,8 @@ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ - (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) -#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) + (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ @@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) #define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + /* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -49,7 +49,7 @@ struct thread_struct { }; #define INIT_THREAD { \ - .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ + .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .sr = DEFAULT_PSR_VALUE, \ } @@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) #define task_pt_regs(p) \ - ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) #define cpu_relax() barrier() diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c @@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) if (task) stack = (unsigned long *)thread_saved_fp(task); else +#ifdef CONFIG_STACKTRACE + asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); +#else stack = (unsigned long *)&stack; +#endif } show_trace(stack); diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -8,6 +8,7 @@ #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/uaccess.h> @@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, static const struct user_regset csky_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, - .n = ELF_NGREG, + .n = sizeof(struct pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = &gpr_get, diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) { unsigned long mask = 1 << cpu; - secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); secondary_ccr = mfcr("cr18"); diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c @@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) vunmap((void *)((unsigned long)addr & PAGE_MASK)); } EXPORT_SYMBOL(iounmap); + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + vma_prot.pgprot |= _PAGE_SO; + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_noncached(vma_prot); + } + + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index cd400d353d18..961c1dc064e1 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -40,6 +40,7 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += sections.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += spinlock.h generic-y += timex.h diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 47c4da3d64a4..b25fd42aa0f4 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -30,6 +30,7 @@ generic-y += rwsem.h generic-y += sections.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild index 61d955c1747a..c1b06dcf6cf8 100644 --- a/arch/hexagon/include/uapi/asm/Kbuild +++ b/arch/hexagon/include/uapi/asm/Kbuild @@ -1,4 +1,3 @@ include include/uapi/asm-generic/Kbuild.asm -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 38049357d6d3..40712e49381b 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -155,18 +155,22 @@ out: static int __init nfhd_init(void) { u32 blocks, bsize; + int ret; int i; nfhd_id = nf_get_id("XHDI"); if (!nfhd_id) return -ENODEV; - major_num = register_blkdev(major_num, "nfhd"); - if (major_num <= 0) { + ret = register_blkdev(major_num, "nfhd"); + if (ret < 0) { pr_warn("nfhd: unable to get major number\n"); - return major_num; + return ret; } + if (!major_num) + major_num = ret; + for (i = NFHD_DEV_OFFSET; i < 24; i++) { if (nfhd_get_capacity(i, 0, &blocks, &bsize)) continue; diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 9f1dd26903e3..95f8f631c4df 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h generic-y += percpu.h generic-y += preempt.h generic-y += sections.h +generic-y += shmparam.h generic-y += spinlock.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index b8b3525271fa..960bf1e4be53 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h -generic-y += shmparam.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 9c7d1d25bf3d..791cc8d54d0a 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -26,6 +26,7 @@ generic-y += parport.h generic-y += percpu.h generic-y += preempt.h generic-y += serial.h +generic-y += shmparam.h generic-y += syscalls.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 28823e3db825..97823ec46e97 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0d14f51d0002..a84c24d894aa 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT please say 'N' here. If you want a high-performance kernel to run on new Loongson 3 machines only, please say 'Y' here. +config CPU_LOONGSON3_WORKAROUNDS + bool "Old Loongson 3 LLSC Workarounds" + default y if SMP + depends on CPU_LOONGSON3 + help + Loongson 3 processors have the llsc issues which require workarounds. + Without workarounds the system may hang unexpectedly. + + Newer Loongson 3 will fix these issues and no workarounds are needed. + The workarounds have no significant side effect on them but may + decrease the performance of the system so this option should be + disabled unless the kernel is intended to be run on old systems. + + If unsure, please say Y. + config CPU_LOONGSON2E bool "Loongson 2E" depends on SYS_HAS_CPU_LOONGSON2E diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6d..4f7b1fa31cf5 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -76,7 +76,7 @@ status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&pins_uart2>; + pinctrl-0 = <&pins_uart3>; }; &uart4 { @@ -196,9 +196,9 @@ bias-disable; }; - pins_uart2: uart2 { - function = "uart2"; - groups = "uart2-data", "uart2-hwflow"; + pins_uart3: uart3 { + function = "uart3"; + groups = "uart3-data", "uart3-hwflow"; bias-disable; }; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 6fb16fd24035..2beb78a62b7d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -161,7 +161,7 @@ #dma-cells = <2>; interrupt-parent = <&intc>; - interrupts = <29>; + interrupts = <20>; clocks = <&cgu JZ4740_CLK_DMA>; diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts index 2152b7ba65fb..cc8dbea0911f 100644 --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts @@ -90,11 +90,11 @@ interrupts = <0>; }; - axi_i2c: i2c@10A00000 { + axi_i2c: i2c@10a00000 { compatible = "xlnx,xps-iic-2.00.a"; interrupt-parent = <&axi_intc>; interrupts = <4>; - reg = < 0x10A00000 0x10000 >; + reg = < 0x10a00000 0x10000 >; clocks = <&ext>; xlnx,clk-freq = <0x5f5e100>; xlnx,family = "Artix7"; @@ -106,9 +106,9 @@ #address-cells = <1>; #size-cells = <0>; - ad7420@4B { + ad7420@4b { compatible = "adi,adt7420"; - reg = <0x4B>; + reg = <0x4b>; }; } ; }; diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 43fcd35e2957..94096299fc56 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b7f6ac5e513c 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -222,6 +222,42 @@ #define __smp_mb__before_atomic() __smp_mb__before_llsc() #define __smp_mb__after_atomic() smp_llsc_mb() +/* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, + * store or pref) in between an ll & sc can cause the sc instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * + * 1) A memory access appearing prior to the ll in program order may actually + * be executed after the ll - this is the reordering case. + * + * In order to avoid this we need to place a memory barrier (ie. a sync + * instruction) prior to every ll instruction, in between it & any earlier + * memory access instructions. Many of these cases are already covered by + * smp_mb__before_llsc() but for the remaining cases, typically ones in + * which multiple CPUs may operate on a memory location but ordering is not + * usually guaranteed, we use loongson_llsc_mb() below. + * + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * + * 2) If a conditional branch exists between an ll & sc with a target outside + * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() + * or similar, then misprediction of the branch may allow speculative + * execution of memory accesses from outside of the ll-sc loop. + * + * In order to avoid this we need a memory barrier (ie. a sync instruction) + * at each affected branch target, for which we also use loongson_llsc_mb() + * defined below. + * + * This case affects all current Loongson 3 CPUs. + */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +#else +#define loongson_llsc_mb() do { } while (0) +#endif + #include <asm-generic/barrier.h> #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index c4675957b21b..830c93a010c3 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # set_bit \n" @@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (~(1UL << bit))); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" @@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index c14d798f3888..b83b0397462d 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -50,6 +50,7 @@ "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ @@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { + loongson_llsc_mb(); __asm__ __volatile__( "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" @@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); + loongson_llsc_mb(); } else return -ENOSYS; diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 57933fc8fd98..910851c62db3 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); } else if (kernel_uses_llsc) { + loongson_llsc_mb(); __asm__ __volatile__ ( " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" @@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) " .set pop \n" : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); + loongson_llsc_mb(); } #else /* !CONFIG_SMP */ if (pte_none(*buddy)) diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a..7f3f136572de 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -457,5 +457,5 @@ void mips_cm_error_report(void) } /* reprime cause register */ - write_gcr_error_cause(0); + write_gcr_error_cause(cm_error); } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6829a064aac8..339870ed92f7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) static int get_frame_info(struct mips_frame_info *info) { bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); - union mips_instruction insn, *ip, *ip_end; + union mips_instruction insn, *ip; const unsigned int max_insns = 128; unsigned int last_insn_size = 0; unsigned int i; @@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) if (!ip) goto err; - ip_end = (void *)ip + info->func_size; - - for (i = 0; i < max_insns && ip < ip_end; i++) { + for (i = 0; i < max_insns; i++) { ip = (void *)ip + last_insn_size; + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { insn.word = ip->halfword[0] << 16; last_insn_size = 2; diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88..c1a4d4dc4665 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS endif cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap + +# +# Some versions of binutils, not currently mainline as of 2019/02/04, support +# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction +# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a +# description). +# +# We disable this in order to prevent the assembler meddling with the +# instruction that labels refer to, ie. if we label an ll instruction: +# +# 1: ll v0, 0(a0) +# +# ...then with the assembler fix applied the label may actually point at a sync +# instruction inserted by the assembler, and if we were using the label in an +# exception table the table would no longer contain the address of the ll +# instruction. +# +# Avoid this by explicitly disabling that assembler behaviour. If upstream +# binutils does not merge support for the flag then we can revisit & remove +# this later - for now it ensures vendor toolchains don't cause problems. +# +cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) + # # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a # as MIPS64 R2; older versions as just R1. This leaves the possibility open diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306..b26892ce871c 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c @@ -59,7 +59,12 @@ static void loongson_poweroff(void) { #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE mach_prepare_shutdown(); - unreachable(); + + /* + * It needs a wait loop here, but mips/kernel/reset.c already calls + * a generic delay loop, machine_hang(), so simply return. + */ + return; #else void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 37b1cb246332..65b6e85447b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * to mimic that here by taking a load/istream page * fault. */ + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); uasm_i_jr(p, ptr); @@ -1646,6 +1648,8 @@ static void iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); # ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_lld(p, pte, 0, ptr); @@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) #endif uasm_l_nopage_tlbl(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_0 & 1) { @@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) #endif uasm_l_nopage_tlbs(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { @@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) #endif uasm_l_nopage_tlbm(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..76e9bf88d3b9 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -79,8 +79,6 @@ enum reg_val_type { REG_64BIT_32BIT, /* 32-bit compatible, need truncation for 64-bit ops. */ REG_32BIT, - /* 32-bit zero extended. */ - REG_32BIT_ZERO_EX, /* 32-bit no sign/zero extension needed. */ REG_32BIT_POS }; @@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; int store_offset = stack_adjust - 8; + enum reg_val_type td; int r0 = MIPS_R_V0; - if (dest_reg == MIPS_R_RA && - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + if (dest_reg == MIPS_R_RA) { /* Don't let zero extended value escape. */ - emit_instr(ctx, sll, r0, r0, 0); + td = get_reg_val_type(ctx, prog->len, BPF_REG_0); + if (td == REG_64BIT) + emit_instr(ctx, sll, r0, r0, 0); + } if (ctx->flags & EBPF_SAVE_RA) { emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); @@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + if (td == REG_64BIT) /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { @@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (src < 0 || dst < 0) return -EINVAL; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } did_move = false; ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { + if (ts == REG_64BIT) { int tmp_reg = MIPS_R_AT; if (bpf_op == BPF_MOV) { @@ -1254,8 +1255,7 @@ jeq_common: if (insn->imm == 64 && td == REG_32BIT) emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm != 64 && - (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { + if (insn->imm != 64 && td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5a..fc29b85cfa92 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return 0; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + /* Point pcibios_map_irq() to the PCI version of it */ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) else octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; - if (!octeon_is_pci_host()) { - pr_notice("Not in host mode, PCI Controller not initialized\n"); - return 0; - } - /* PCI I/O and PCI MEM values */ set_io_port_base(OCTEON_PCI_IOSPACE_BASE); ioport_resource.start = 0; diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index f6fd340e39c2..0ede4deb8181 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -8,6 +8,7 @@ ccflags-vdso := \ $(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ -D__VDSO__ ifdef CONFIG_CC_IS_CLANG @@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) @@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index eb87cd8327c8..1f04844b6b82 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -34,6 +34,7 @@ generic-y += qrwlock_types.h generic-y += qrwlock.h generic-y += sections.h generic-y += segment.h +generic-y += shmparam.h generic-y += string.h generic-y += switch_to.h generic-y += topology.h diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..0964c236e3e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = tracehook_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2e6ada28da64..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) static inline int pud_present(pud_t pud) { - return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); } extern struct page *pud_page(pud_t pud); @@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) static inline int pgd_present(pgd_t pgd) { - return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); } static inline pte_t pgd_pte(pgd_t pgd) @@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl, - struct vm_area_struct *vma) -{ - if (radix_enabled()) - return false; - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} - - +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma); +/* + * Hash translation mode use the deposited table to store hash pte + * slot information. + */ #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit static inline bool arch_needs_pgtable_deposit(void) { diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f3c31f5e1026..ecd31569a120 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m) atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); } #endif /* CONFIG_PROC_FS */ + +/* + * For hash translation mode, we use the deposited table to store hash slot + * information and they are stored at PTRS_PER_PMD offset from related pmd + * location. Hence a pmd move requires deposit and withdraw. + * + * For radix translation with split pmd ptl, we store the deposited table in the + * pmd page. Hence if we have different pmd page we need to withdraw during pmd + * move. + * + * With hash we use deposited table always irrespective of anon or not. + * With radix we use deposited table only for anonymous mapping. + */ +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma) +{ + if (radix_enabled()) + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); + + return true; +} diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7db3119f8a5b..145373f0e5dc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pnv_pci_ioda2_setup_dma_pe(phb, pe); #ifdef CONFIG_IOMMU_API + iommu_register_group(&pe->table_group, + pe->phb->hose->global_number, pe->pe_number); pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); #endif } diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 45fb70b4bfa7..ef9448a907c6 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, return 0; pe = &phb->ioda.pe_array[pdn->pe_number]; + if (!pe->table_group.group) + return 0; iommu_add_device(&pe->table_group, dev); return 0; case BUS_NOTIFY_DEL_DEVICE: diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 7d6457ab5d34..bba281b1fe1b 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; uint64_t rc, token; + uint64_t saved = 0; /* * When the hypervisor cannot map all the requested memory in a single @@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p) rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, p->blocks, BIND_ANY_ADDR, token); token = ret[0]; + if (!saved) + saved = ret[1]; cond_resched(); } while (rc == H_BUSY); @@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) return -ENXIO; } - p->bound_addr = ret[1]; + p->bound_addr = saved; dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index feeeaa60697c..515fc3cc9687 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -103,7 +103,7 @@ choice prompt "Base ISA" default ARCH_RV64I help - This selects the base ISA that this kernel will traget and must match + This selects the base ISA that this kernel will target and must match the target platform. config ARCH_RV32I diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index f399659d3b8d..2fd3461e50ab 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SMP=y -CONFIG_PCI=y -CONFIG_PCIE_XILINX=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NET=y @@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NETLINK_DIAG=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIE_XILINX=y CONFIG_DEVTMPFS=y CONFIG_BLK_DEV_LOOP=y CONFIG_VIRTIO_BLK=y @@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y CONFIG_VIRTIO_MMIO=y CONFIG_SIFIVE_PLIC=y -CONFIG_RAS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS4_FS=y @@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y # CONFIG_RCU_TRACE is not set diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 06cfbb3aacbb..2a546a52f02a 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -80,7 +80,7 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) -#ifdef CONFIG_64BITS +#ifdef CONFIG_64BIT #define PTE_FMT "%016lx" #else #define PTE_FMT "%08lx" diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -35,6 +35,12 @@ #define _PAGE_SPECIAL _PAGE_SOFT #define _PAGE_TABLE _PAGE_PRESENT +/* + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to + * distinguish them from swapped out pages + */ +#define _PAGE_PROT_NONE _PAGE_READ + #define _PAGE_PFN_SHIFT 10 /* Set of bits to preserve across pte_modify() */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..a8179a8c1491 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -44,7 +44,7 @@ /* Page protection bits */ #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_NONE __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) @@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _PAGE_PRESENT); + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pmd_none(pmd_t pmd) @@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) static inline int pte_present(pte_t pte) { - return (pte_val(pte) & _PAGE_PRESENT); + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pte_none(pte_t pte) @@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) - * bit 1: reserved for future use (zero) + * bit 1: _PAGE_PROT_NONE (zero) * bits 2 to 6: swap type * bits 7 to XLEN-1: swap offset */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 0531f49af5c3..ce70bceb8872 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -22,7 +22,7 @@ * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 6a92a2fe198e..dac98348c6a3 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -39,6 +39,7 @@ void asm_offsets(void) OFFSET(TASK_STACK, task_struct, stack); OFFSET(TASK_TI, task_struct, thread_info); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 355166f57205..fd9b57c8b4ce 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -144,6 +144,10 @@ _save_context: REG_L x2, PT_SP(sp) .endm +#if !IS_ENABLED(CONFIG_PREEMPT) +.set resume_kernel, restore_all +#endif + ENTRY(handle_exception) SAVE_ALL @@ -228,7 +232,7 @@ ret_from_exception: REG_L s0, PT_SSTATUS(sp) csrc sstatus, SR_SIE andi s0, s0, SR_SPP - bnez s0, restore_all + bnez s0, resume_kernel resume_userspace: /* Interrupts must be disabled here so flags are checked atomically */ @@ -250,6 +254,18 @@ restore_all: RESTORE_ALL sret +#if IS_ENABLED(CONFIG_PREEMPT) +resume_kernel: + REG_L s0, TASK_TI_PREEMPT_COUNT(tp) + bnez s0, restore_all +need_resched: + REG_L s0, TASK_TI_FLAGS(tp) + andi s0, s0, _TIF_NEED_RESCHED + beqz s0, restore_all + call preempt_schedule_irq + j need_resched +#endif + work_pending: /* Enter slow path for supplementary processing */ la ra, ret_from_exception diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 6e079e94b638..77564310235f 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -181,7 +181,7 @@ static void __init setup_bootmem(void) BUG_ON(mem_size == 0); set_max_mapnr(PFN_DOWN(mem_size)); - max_low_pfn = memblock_end_of_DRAM(); + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index fc185ecabb0a..18cda0e8cf94 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -57,15 +57,12 @@ void __init setup_smp(void) while ((dn = of_find_node_by_type(dn, "cpu"))) { hart = riscv_of_processor_hartid(dn); - if (hart < 0) { - of_node_put(dn); + if (hart < 0) continue; - } if (hart == cpuid_to_hartid_map(0)) { BUG_ON(found_boot_cpu); found_boot_cpu = 1; - of_node_put(dn); continue; } @@ -73,7 +70,6 @@ void __init setup_smp(void) set_cpu_possible(cpuid, true); set_cpu_present(cpuid, true); cpuid++; - of_node_put(dn); } BUG_ON(!found_boot_cpu); diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -18,8 +18,6 @@ #include <asm/cache.h> #include <asm/thread_info.h> -#define MAX_BYTES_PER_LONG 0x10 - OUTPUT_ARCH(riscv) ENTRY(_start) @@ -76,6 +74,8 @@ SECTIONS *(.sbss*) } + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) NOTES @@ -83,10 +83,6 @@ SECTIONS *(.rel.dyn*) } - BSS_SECTION(MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG) - _end = .; STABS_DEBUG diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 1d9bfaff60bc..658ebf645f42 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -28,7 +28,8 @@ static void __init zone_sizes_init(void) unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, + (unsigned long) PFN_PHYS(max_low_pfn))); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..b6796e616812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -30,10 +30,10 @@ .section .text ENTRY(swsusp_arch_suspend) lg %r1,__LC_NODAT_STACK - aghi %r1,-STACK_FRAME_OVERHEAD stmg %r6,%r15,__SF_GPRS(%r1) + aghi %r1,-STACK_FRAME_OVERHEAD stg %r15,__SF_BACKCHAIN(%r1) - lgr %r1,%r15 + lgr %r15,%r1 /* Store FPU registers */ brasl %r14,save_fpu_regs diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a153257bf7d9..d62fa148558b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->crycbd = 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; - if (!apie_h && !key_msk) + if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) return 0; if (!crycb_addr) diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..4266a4de3160 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) if (ai == -1UL) break; inc_irq_stat(IRQIO_MSI); + airq_iv_lock(aibv, ai); generic_handle_irq(airq_iv_get_data(aibv, ai)); + airq_iv_unlock(aibv, ai); } } } @@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) zdev->aisb = aisb; /* Create adapter interrupt vector */ - zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); + zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); if (!zdev->aibv) return -ENOMEM; diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 01d0f7fb14cc..2563d1e532e2 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile @@ -1,3 +1,3 @@ ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") -obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o endif diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1372553dc0a9..1d1544b6ca74 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += preempt.h generic-y += sections.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += syscalls.h generic-y += topology.h diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 26387c7bf305..68261430fe6e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -446,12 +446,12 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. -config X86_RESCTRL - bool "Resource Control support" +config X86_CPU_RESCTRL + bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS help - Enable Resource Control support. + Enable x86 CPU resource control support. Provide support for the allocation and monitoring of system resources usage by the CPU. diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 64037895b085..f62e347862cc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src) leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax movl %eax, %cr3 3: + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ + pushl %ecx + pushl %edx + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + popl %edx + popl %ecx + /* Enable PAE and LA57 (if required) paging modes */ movl $X86_CR4_PAE, %eax cmpl $0, %edx diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h index 91f75638f6e6..6ff7e81b5628 100644 --- a/arch/x86/boot/compressed/pgtable.h +++ b/arch/x86/boot/compressed/pgtable.h @@ -6,7 +6,7 @@ #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE -#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 +#define TRAMPOLINE_32BIT_CODE_SIZE 0x70 #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2278,6 +2278,19 @@ void perf_check_microcode(void) x86_pmu.check_microcode(); } +static int x86_pmu_check_period(struct perf_event *event, u64 value) +{ + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) + return -EINVAL; + + if (value && x86_pmu.limit_period) { + if (x86_pmu.limit_period(event, value) > value) + return -EINVAL; + } + + return 0; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2302,6 +2315,7 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, .task_ctx_size = sizeof(struct x86_perf_task_context), + .check_period = x86_pmu_check_period, }; void arch_perf_update_userpage(struct perf_event *event, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu) static void intel_pmu_cpu_dying(int cpu) { + fini_debug_store_on_cpu(cpu); + + if (x86_pmu.counter_freezing) + disable_counter_freeze(); +} + +static void intel_pmu_cpu_dead(int cpu) +{ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct intel_shared_regs *pc; @@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu) } free_excl_cntrs(cpu); - - fini_debug_store_on_cpu(cpu); - - if (x86_pmu.counter_freezing) - disable_counter_freeze(); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -3584,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, intel_pmu_lbr_sched_task(ctx, sched_in); } +static int intel_pmu_check_period(struct perf_event *event, u64 value) +{ + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -3663,6 +3671,9 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + + .check_period = intel_pmu_check_period, }; static struct attribute *intel_pmu_attrs[]; @@ -3703,8 +3714,12 @@ static __initconst const struct x86_pmu intel_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, + + .check_period = intel_pmu_check_period, }; static __init void intel_clovertown_quirk(void) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe8..b10e04387f38 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = { .id_table = snbep_uncore_pci_ids, }; +#define NODE_ID_MASK 0x7 + /* * build pci bus to socket mapping */ @@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); if (err) break; - nodeid = config; + nodeid = config & NODE_ID_MASK; /* get the Node ID mapping */ err = pci_read_config_dword(ubox_dev, idmap_loc, &config); if (err) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -646,6 +646,11 @@ struct x86_pmu { * Intel host/guest support (KVM) */ struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 period); }; struct x86_perf_task_context { @@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL -static inline bool intel_pmu_has_bts(struct perf_event *event) +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) { struct hw_perf_event *hwc = &event->hw; unsigned int hw_event, bts_event; @@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); - return hw_event == bts_event && hwc->sample_period == 1; + return hw_event == bts_event && period == 1; +} + +static inline bool intel_pmu_has_bts(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + return intel_pmu_has_bts_period(event, hwc->sample_period); } int intel_pmu_save_and_restart(struct perf_event *event); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..7dbbe9ffda17 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -51,7 +51,7 @@ static unsigned long get_dr(int n) /* * fill in the user structure for a core dump.. */ -static void dump_thread32(struct pt_regs *regs, struct user32 *dump) +static void fill_dump(struct pt_regs *regs, struct user32 *dump) { u32 fs, gs; memset(dump, 0, sizeof(*dump)); @@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; + + fill_dump(cprm->regs, &dump); + strncpy(dump.u_comm, current->comm, sizeof(current->comm)); dump.u_ar0 = offsetof(struct user32, regs); dump.signal = cprm->siginfo->si_signo; - dump_thread32(cprm->regs, &dump); /* * If the size of the dump file exceeds the rlimit, then see diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 0dd6b0f4000e..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -6,7 +6,7 @@ * "Big Core" Processors (Branded as Core, Xeon, etc...) * * The "_X" parts are generally the EP and EX Xeons, or the - * "Extreme" ones, like Broadwell-E. + * "Extreme" ones, like Broadwell-E, or Atom microserver. * * While adding a new CPUID for a new microarchitecture, add a new * group to keep logically sorted out in chronological order. Within @@ -52,6 +52,8 @@ #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 +#define INTEL_FAM6_ICELAKE_MOBILE 0x7E + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ @@ -71,6 +73,7 @@ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ +#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ /* Xeon Phi */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..180373360e34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -299,6 +299,7 @@ union kvm_mmu_extended_role { unsigned int cr4_smap:1; unsigned int cr4_smep:1; unsigned int cr4_la57:1; + unsigned int maxphyaddr:6; }; }; @@ -397,6 +398,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; + gpa_t root_cr3; union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8f657286d599..0ce558a8150d 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -7,7 +7,11 @@ #endif #ifdef CONFIG_KASAN +#ifdef CONFIG_KASAN_EXTRA +#define KASAN_STACK_ORDER 2 +#else #define KASAN_STACK_ORDER 1 +#endif #else #define KASAN_STACK_ORDER 0 #endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - native_set_pmd(pmdp, pmd); + set_pmd(pmdp, pmd); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h index 40ebddde6ac2..f6b7fe2833cc 100644 --- a/arch/x86/include/asm/resctrl_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_RESCTRL_SCHED_H #define _ASM_X86_RESCTRL_SCHED_H -#ifdef CONFIG_X86_RESCTRL +#ifdef CONFIG_X86_CPU_RESCTRL #include <linux/sched.h> #include <linux/jump_label.h> @@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void) static inline void resctrl_sched_in(void) {} -#endif /* CONFIG_X86_RESCTRL */ +#endif /* CONFIG_X86_CPU_RESCTRL */ #endif /* _ASM_X86_RESCTRL_SCHED_H */ diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -48,7 +48,8 @@ enum { BIOS_STATUS_SUCCESS = 0, BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, BIOS_STATUS_EINVAL = -EINVAL, - BIOS_STATUS_UNAVAIL = -EBUSY + BIOS_STATUS_UNAVAIL = -EBUSY, + BIOS_STATUS_ABORT = -EINTR, }; /* Address map parameters */ @@ -167,4 +168,9 @@ extern long system_serial_number; extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ +/* + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details + */ +extern struct semaphore __efi_uv_runtime_lock; + #endif /* _ASM_X86_UV_BIOS_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index b6fa0869f7aa..cfd24f9f7614 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ -obj-$(CONFIG_X86_RESCTRL) += resctrl/ +obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 1de0f4170178..01874d54f4fd 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -71,7 +71,7 @@ void __init check_bugs(void) * identify_boot_cpu() initialized SMT support information, let the * core code know. */ - cpu_smt_check_topology_early(); + cpu_smt_check_topology(); if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..6ce290c506d9 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 51adde0a0f1a..e1f3ba19ba54 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) if (!p) { return ret; } else { - if (boot_cpu_data.microcode == p->patch_id) + if (boot_cpu_data.microcode >= p->patch_id) return ret; ret = UCODE_NEW; diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 1cabe6fd8e11..4a06c37b9cf1 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 0d5efa34f359..53917a3ebf94 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, struct efi_info *current_ei = &boot_params.efi_info; struct efi_info *ei = ¶ms->efi_info; + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + if (!current_ei->efi_memmap_size) return 0; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bbffa6c54697..c07958b59f50 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; + unsigned f_la57 = 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, // TSC_ADJUST is emulated entry->ebx |= F(TSC_ADJUST); entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + f_la57 = entry->ecx & F(LA57); cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* Set LA57 based on hardware capability. */ + entry->ecx |= f_la57; entry->ecx |= f_umip; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..f2d1d230d5b8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, &invalid_list); mmu->root_hpa = INVALID_PAGE; } + mmu->root_cr3 = 0; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); @@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); return 0; } @@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; u64 pdptr, pm_mask; - gfn_t root_gfn; + gfn_t root_gfn, root_cr3; int i; - root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + root_gfn = root_cr3 >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu->root_hpa = root; - return 0; + goto set_root_cr3; } /* @@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } +set_root_cr3: + vcpu->arch.mmu->root_cr3 = root_cr3; + return 0; } @@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, struct kvm_mmu_root_info root; struct kvm_mmu *mmu = vcpu->arch.mmu; - root.cr3 = mmu->get_cr3(vcpu); + root.cr3 = mmu->root_cr3; root.hpa = mmu->root_hpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { @@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, } mmu->root_hpa = root.hpa; + mmu->root_cr3 = root.cr3; return i < KVM_MMU_NUM_PREV_ROOTS; } @@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) ext.cr4_pse = !!is_pse(vcpu); ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + ext.maxphyaddr = cpuid_maxphyaddr(vcpu); ext.valid = 1; @@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.root_cr3 = 0; vcpu->arch.root_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.root_cr3 = 0; vcpu->arch.guest_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8ff20523661b..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu) if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; + hrtimer_cancel(&vmx->nested.preemption_timer); vmx->nested.vmxon = false; vmx->nested.smm.vmxon = false; free_vpid(vmx->nested.vpid02); @@ -2472,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) return -EINVAL; + if (!nested_cpu_has_preemption_timer(vmcs12) && + nested_cpu_has_save_preemption_timer(vmcs12)) + return -EINVAL; + if (nested_cpu_has_ept(vmcs12) && !valid_ept_address(vcpu, vmcs12->ept_pointer)) return -EINVAL; @@ -5556,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); + if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high &= SECONDARY_EXEC_DESC | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4341175339f3..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -26,6 +26,7 @@ #include <linux/mod_devicetable.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/smt.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/trace_events.h> @@ -862,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, if (!entry_only) j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; @@ -1192,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; - /* - * First handle the simple case where no cmpxchg is necessary; just - * allow posting non-urgent interrupts. - * - * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change - * PI.NDST: pi_post_block will do it for us and the wakeup_handler - * expects the VCPU to be on the blocked_vcpu_list that matches - * PI.NDST. - */ - if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || - vcpu->cpu == cpu) { - pi_clear_sn(pi_desc); - return; - } - /* The full case. */ do { old.control = new.control = pi_desc->control; @@ -1221,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) new.sn = 0; } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); + + /* + * Clear SN before reading the bitmap. The VT-d firmware + * writes the bitmap and reads SN atomically (5.2.3 in the + * spec), so it doesn't really have a memory barrier that + * pairs with this, but we cannot do that and we need one. + */ + smp_mb__after_atomic(); + + if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) + pi_set_on(pi_desc); } /* @@ -6823,7 +6821,7 @@ static int vmx_vm_init(struct kvm *kvm) * Warn upon starting the first VM in a potentially * insecure environment. */ - if (cpu_smt_control == CPU_SMT_ENABLED) + if (sched_smt_active()) pr_warn_once(L1TF_MSG_SMT); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) pr_warn_once(L1TF_MSG_L1D); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } -static inline void pi_clear_sn(struct pi_desc *pi_desc) +static inline void pi_set_sn(struct pi_desc *pi_desc) { - return clear_bit(POSTED_INTR_SN, + return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } -static inline void pi_set_sn(struct pi_desc *pi_desc) +static inline void pi_set_on(struct pi_desc *pi_desc) { - return set_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); + set_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); } static inline void pi_clear_on(struct pi_desc *pi_desc) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d27206f6c01..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + /* + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED + * is returned, but our callers are not ready for that and they blindly + * call kvm_inject_page_fault. Ensure that they at least do not leak + * uninitialized kernel stack memory into cr2 and error code. + */ + memset(exception, 0, sizeof(*exception)); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } @@ -7794,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * 1) We should set ->mode before checking ->requests. Please see * the comment in kvm_vcpu_exiting_guest_mode(). * - * 2) For APICv, we should set ->mode before checking PIR.ON. This + * 2) For APICv, we should set ->mode before checking PID.ON. This * pairs with the memory barrier implicit in pi_test_and_set_on * (see vmx_deliver_posted_interrupt). * diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c index 66894675f3c8..df50451d94ef 100644 --- a/arch/x86/lib/iomem.c +++ b/arch/x86/lib/iomem.c @@ -2,8 +2,11 @@ #include <linux/module.h> #include <linux/io.h> +#define movs(type,to,from) \ + asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory") + /* Originally from i386/string.h */ -static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) +static __always_inline void rep_movs(void *to, const void *from, size_t n) { unsigned long d0, d1, d2; asm volatile("rep ; movsl\n\t" @@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) { - __iomem_memcpy(to, (const void *)from, n); + if (unlikely(!n)) + return; + + /* Align any unaligned source IO */ + if (unlikely(1 & (unsigned long)from)) { + movs("b", to, from); + n--; + } + if (n > 1 && unlikely(2 & (unsigned long)from)) { + movs("w", to, from); + n-=2; + } + rep_movs(to, (const void *)from, n); } EXPORT_SYMBOL(memcpy_fromio); void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) { - __iomem_memcpy((void *)to, (const void *) from, n); + if (unlikely(!n)) + return; + + /* Align any unaligned destination IO */ + if (unlikely(1 & (unsigned long)to)) { + movs("b", to, from); + n--; + } + if (n > 1 && unlikely(2 & (unsigned long)to)) { + movs("w", to, from); + n-=2; + } + rep_movs((void *)to, (const void *) from, n); } EXPORT_SYMBOL(memcpy_toio); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2ff25ad33233..9d5c75f02295 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) return; } - addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); + addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); #ifdef CONFIG_X86_64 addr |= ((u64)desc.base3 << 32); #endif diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..14e6119838a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif +/* + * See set_mce_nospec(). + * + * Machine check recovery code needs to change cache mode of poisoned pages to + * UC to avoid speculative access logging another error. But passing the + * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a + * speculative access. So we cheat and flip the top bit of the address. This + * works fine for the code that updates the page tables. But at the end of the + * process we need to flush the TLB and cache and the non-canonical address + * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. + * + * But in the common case we already have a canonical address. This code + * will fix the top bit if needed and is a no-op otherwise. + */ +static inline unsigned long fix_addr(unsigned long addr) +{ +#ifdef CONFIG_X86_64 + return (long)(addr << 1) >> 1; +#else + return addr; +#endif +} + static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) { if (cpa->flags & CPA_PAGES_ARRAY) { @@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) unsigned int i; for (i = 0; i < cpa->numpages; i++) - __flush_tlb_one_kernel(__cpa_addr(cpa, i)); + __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); } static void cpa_flush(struct cpa_data *data, int cache) @@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) * Only flush present addresses: */ if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range_opt((void *)addr, PAGE_SIZE); + clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); } mb(); } @@ -1627,29 +1650,6 @@ out: return ret; } -/* - * Machine check recovery code needs to change cache mode of poisoned - * pages to UC to avoid speculative access logging another error. But - * passing the address of the 1:1 mapping to set_memory_uc() is a fine - * way to encourage a speculative access. So we cheat and flip the top - * bit of the address. This works fine for the code that updates the - * page tables. But at the end of the process we need to flush the cache - * and the non-canonical address causes a #GP fault when used by the - * CLFLUSH instruction. - * - * But in the common case we already have a canonical address. This code - * will fix the top bit if needed and is a no-op otherwise. - */ -static inline unsigned long make_addr_canonical_again(unsigned long addr) -{ -#ifdef CONFIG_X86_64 - return (long)(addr << 1) >> 1; -#else - return addr; -#endif -} - - static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -29,7 +29,8 @@ struct uv_systab *uv_systab; -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + u64 a4, u64 a5) { struct uv_systab *tab = uv_systab; s64 ret; @@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) return ret; } + +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +{ + s64 ret; + + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + up(&__efi_uv_runtime_lock); + + return ret; +} EXPORT_SYMBOL_GPL(uv_bios_call); s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, @@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, unsigned long bios_flags; s64 ret; + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + local_irq_save(bios_flags); - ret = uv_bios_call(which, a1, a2, a3, a4, a5); + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); local_irq_restore(bios_flags); + up(&__efi_uv_runtime_lock); + return ret; } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 20a0756f27ef..ce91682770cb 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI If unsure, say N. config XTENSA_UNALIGNED_USER - bool "Unaligned memory access in use space" + bool "Unaligned memory access in user space" help The Xtensa architecture currently does not handle unaligned memory accesses in hardware but through an exception handler. @@ -451,7 +451,7 @@ config USE_OF help Include support for flattened device tree machine descriptions. -config BUILTIN_DTB +config BUILTIN_DTB_SOURCE string "DTB to build into the kernel image" depends on OF diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile index f8052ba5aea8..0b8d00cdae7c 100644 --- a/arch/xtensa/boot/dts/Makefile +++ b/arch/xtensa/boot/dts/Makefile @@ -7,9 +7,9 @@ # # -BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o -ifneq ($(CONFIG_BUILTIN_DTB),"") -obj-$(CONFIG_OF) += $(BUILTIN_DTB) +BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") +obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE) endif # for CONFIG_OF_ALL_DTBS test diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig index 2bf964df37ba..f378e56f9ce6 100644 --- a/arch/xtensa/configs/audio_kc705_defconfig +++ b/arch/xtensa/configs/audio_kc705_defconfig @@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705" +CONFIG_BUILTIN_DTB_SOURCE="kc705" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_PM=y diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index 3221b7053fa3..62f32a902568 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig @@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y # CONFIG_PCI is not set CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="csp" +CONFIG_BUILTIN_DTB_SOURCE="csp" # CONFIG_COMPACTION is not set CONFIG_XTFPGA_LCD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig index 985fa8546e4e..8bebe07f1060 100644 --- a/arch/xtensa/configs/generic_kc705_defconfig +++ b/arch/xtensa/configs/generic_kc705_defconfig @@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705" +CONFIG_BUILTIN_DTB_SOURCE="kc705" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig index f3fc4f970ca8..933ab2adf434 100644 --- a/arch/xtensa/configs/nommu_kc705_defconfig +++ b/arch/xtensa/configs/nommu_kc705_defconfig @@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705_nommu" +CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu" CONFIG_BINFMT_FLAT=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index 11fed6c06a7c..e29c5b179a5b 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -33,11 +33,12 @@ CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set # CONFIG_PCI is not set +CONFIG_VECTORS_OFFSET=0x00002000 CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="lx200mx" +CONFIG_BUILTIN_DTB_SOURCE="lx200mx" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index da08e75100ab..7f009719304e 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -276,12 +276,13 @@ should_never_return: movi a2, cpu_start_ccount 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b movi a3, 0 s32i a3, a2, 0 - memw 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b wsr a3, ccount @@ -317,11 +318,13 @@ ENTRY(cpu_restart) rsr a0, prid neg a2, a0 movi a3, cpu_start_id + memw s32i a2, a3, 0 #if XCHAL_DCACHE_IS_WRITEBACK dhwbi a3, 0 #endif 1: + memw l32i a2, a3, 0 dhi a3, 0 bne a2, a0, 1b diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 932d64689bac..be1f280c322c 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned i; - for (i = 0; i < max_cpus; ++i) + for_each_possible_cpu(i) set_cpu_present(i, true); } @@ -96,6 +96,11 @@ void __init smp_init_cpus(void) pr_info("%s: Core Count = %d\n", __func__, ncpus); pr_info("%s: Core Id = %d\n", __func__, core_id); + if (ncpus > NR_CPUS) { + ncpus = NR_CPUS; + pr_info("%s: limiting core count by %d\n", __func__, ncpus); + } + for (i = 0; i < ncpus; ++i) set_cpu_possible(i, true); } @@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) int i; #ifdef CONFIG_HOTPLUG_CPU - cpu_start_id = cpu; - system_flush_invalidate_dcache_range( - (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); + WRITE_ONCE(cpu_start_id, cpu); + /* Pairs with the third memw in the cpu_restart */ + mb(); + system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, + sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); @@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) ccount = get_ccount(); while (!ccount); - cpu_start_ccount = ccount; + WRITE_ONCE(cpu_start_ccount, ccount); - while (time_before(jiffies, timeout)) { + do { + /* + * Pairs with the first two memws in the + * .Lboot_secondary. + */ mb(); - if (!cpu_start_ccount) - break; - } + ccount = READ_ONCE(cpu_start_ccount); + } while (ccount && time_before(jiffies, timeout)); - if (cpu_start_ccount) { + if (ccount) { smp_call_function_single(0, mx_cpu_stop, - (void *)cpu, 1); - cpu_start_ccount = 0; + (void *)cpu, 1); + WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } @@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", __func__, cpu, idle, start_info.stack); + init_completion(&cpu_running); ret = boot_secondary(cpu, idle); if (ret == 0) { wait_for_completion_timeout(&cpu_running, @@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu) unsigned long timeout = jiffies + msecs_to_jiffies(1000); while (time_before(jiffies, timeout)) { system_invalidate_dcache_range((unsigned long)&cpu_start_id, - sizeof(cpu_start_id)); - if (cpu_start_id == -cpu) { + sizeof(cpu_start_id)); + /* Pairs with the second memw in the cpu_restart */ + mb(); + if (READ_ONCE(cpu_start_id) == -cpu) { platform_cpu_kill(cpu); return; } diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index fd524a54d2ab..378186b5eb40 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt) container_of(evt, struct ccount_timer, evt); if (timer->irq_enabled) { - disable_irq(evt->irq); + disable_irq_nosync(evt->irq); timer->irq_enabled = 0; } return 0; diff --git a/block/blk-core.c b/block/blk-core.c index 3c5f61ceeb67..6b78ec56a4f2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } +static void blk_timeout_work(struct work_struct *work) +{ +} + /** * blk_alloc_queue_node - allocate a request queue * @gfp_mask: memory allocation flags @@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); - INIT_WORK(&q->timeout_work, NULL); + INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP INIT_LIST_HEAD(&q->blkg_list); diff --git a/block/blk-flush.c b/block/blk-flush.c index a3fc7191c694..6e0f2d97fc6d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); - blk_mq_run_hw_queue(hctx, true); + blk_mq_sched_restart(hctx); } /** diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index fc714ef402a6..2620baa1f699 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -72,6 +72,7 @@ #include <linux/sched/loadavg.h> #include <linux/sched/signal.h> #include <trace/events/block.h> +#include <linux/blk-mq.h> #include "blk-rq-qos.h" #include "blk-stat.h" @@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) u64 now = ktime_to_ns(ktime_get()); bool issue_as_root = bio_issue_as_root_blkg(bio); bool enabled = false; + int inflight = 0; blkg = bio->bi_blkg; if (!blkg || !bio_flagged(bio, BIO_TRACKED)) @@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) return; enabled = blk_iolatency_enabled(iolat->blkiolat); + if (!enabled) + return; + while (blkg && blkg->parent) { iolat = blkg_to_lat(blkg); if (!iolat) { @@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) } rqw = &iolat->rq_wait; - atomic_dec(&rqw->inflight); - if (!enabled || iolat->min_lat_nsec == 0) + inflight = atomic_dec_return(&rqw->inflight); + WARN_ON_ONCE(inflight < 0); + if (iolat->min_lat_nsec == 0) goto next; iolatency_record_time(iolat, &bio->bi_issue, now, issue_as_root); @@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q) return 0; } -static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) +/* + * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise + * return 0. + */ +static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) { struct iolatency_grp *iolat = blkg_to_lat(blkg); - struct blk_iolatency *blkiolat = iolat->blkiolat; u64 oldval = iolat->min_lat_nsec; iolat->min_lat_nsec = val; @@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) BLKIOLATENCY_MAX_WIN_SIZE); if (!oldval && val) - atomic_inc(&blkiolat->enabled); + return 1; if (oldval && !val) - atomic_dec(&blkiolat->enabled); + return -1; + return 0; } static void iolatency_clear_scaling(struct blkcg_gq *blkg) @@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, u64 lat_val = 0; u64 oldval; int ret; + int enable = 0; ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); if (ret) @@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, blkg = ctx.blkg; oldval = iolat->min_lat_nsec; - iolatency_set_min_lat_nsec(blkg, lat_val); + enable = iolatency_set_min_lat_nsec(blkg, lat_val); + if (enable) { + WARN_ON_ONCE(!blk_get_queue(blkg->q)); + blkg_get(blkg); + } + if (oldval != iolat->min_lat_nsec) { iolatency_clear_scaling(blkg); } @@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, ret = 0; out: blkg_conf_finish(&ctx); + if (ret == 0 && enable) { + struct iolatency_grp *tmp = blkg_to_lat(blkg); + struct blk_iolatency *blkiolat = tmp->blkiolat; + + blk_mq_freeze_queue(blkg->q); + + if (enable == 1) + atomic_inc(&blkiolat->enabled); + else if (enable == -1) + atomic_dec(&blkiolat->enabled); + else + WARN_ON_ONCE(1); + + blk_mq_unfreeze_queue(blkg->q); + + blkg_put(blkg); + blk_put_queue(blkg->q); + } return ret ?: nbytes; } @@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); + struct blk_iolatency *blkiolat = iolat->blkiolat; + int ret; - iolatency_set_min_lat_nsec(blkg, 0); + ret = iolatency_set_min_lat_nsec(blkg, 0); + if (ret == 1) + atomic_inc(&blkiolat->enabled); + if (ret == -1) + atomic_dec(&blkiolat->enabled); iolatency_clear_scaling(blkg); } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f8120832ca7b..7921573aebbc 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { static bool debugfs_create_files(struct dentry *parent, void *data, const struct blk_mq_debugfs_attr *attr) { + if (IS_ERR_OR_NULL(parent)) + return false; + d_inode(parent)->i_private = data; for (; attr->name; attr++) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..9437a5eb07cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work) spin_unlock_irq(&q->requeue_lock); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { - if (!(rq->rq_flags & RQF_SOFTBARRIER)) + if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) continue; rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_sched_insert_request(rq, true, false, false); + /* + * If RQF_DONTPREP, rq has contained some driver specific + * data, so insert it to hctx dispatch list to avoid any + * merge. + */ + if (rq->rq_flags & RQF_DONTPREP) + blk_mq_request_bypass_insert(rq, false); + else + blk_mq_sched_insert_request(rq, true, false, false); } while (!list_empty(&rq_list)) { diff --git a/block/blk-mq.h b/block/blk-mq.h index d943d46b0785..d0b3dd54ef8d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -36,7 +36,6 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; -void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..ec78a04eb136 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) int af_alg_release(struct socket *sock) { - if (sock->sk) + if (sock->sk) { sock_put(sock->sk); + sock->sk = NULL; + } return 0; } EXPORT_SYMBOL_GPL(af_alg_release); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..147f6c7ea59c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1029,6 +1029,9 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; + /* Initialize debug output. Linux does not use ACPICA defaults */ + acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; + #ifdef CONFIG_X86 /* * If the machine falls into the DMI check table, diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..4d2b2ad1ee0e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_names, *device_tmp; + char *device_name, *device_tmp; struct binder_device *device; struct hlist_node *tmp; + char *device_names = NULL; ret = binder_alloc_shrinker_init(); if (ret) @@ -5898,23 +5899,29 @@ static int __init binder_init(void) &transaction_log_fops); } - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kstrdup(binder_devices_param, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } + if (strcmp(binder_devices_param, "") != 0) { + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kstrdup(binder_devices_param, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } } + ret = init_binderfs(); + if (ret) + goto err_init_binder_device_failed; + return ret; err_init_binder_device_failed: diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7fb97f503ef2..045b3e42d98b 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode) } #endif +#ifdef CONFIG_ANDROID_BINDERFS +extern int __init init_binderfs(void); +#else +static inline int __init init_binderfs(void) +{ + return 0; +} +#endif + #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 6a2185eb66c5..e773f45d19d9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == &init_ipc_ns); +#else + bool use_reserve = true; +#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) @@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_alloc_max(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -static int __init init_binderfs(void) +int __init init_binderfs(void) { int ret; @@ -560,5 +568,3 @@ static int __init init_binderfs(void) return ret; } - -device_initcall(init_binderfs); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b8c3f9e6af89..adf28788cab5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) struct ht16k33_priv *priv = i2c_get_clientdata(client); struct ht16k33_fbdev *fbdev = &priv->fbdev; - cancel_delayed_work(&fbdev->work); + cancel_delayed_work_sync(&fbdev->work); unregister_framebuffer(fbdev->info); framebuffer_release(fbdev->info); free_page((unsigned long) fbdev->buffer); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 457be03b744d..ccd296dbb95c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - hrtimer_cancel(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; u64 last_busy, expires = 0; - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_mono_fast_ns(); if (!dev->power.use_autosuspend) goto out; @@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_to_ns(ktime_get())) { + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; - ktime_t expires; + u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - expires = ktime_add(ktime_get(), ms_to_ktime(delay)); - dev->power.timer_expires = ktime_to_ns(expires); + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..55481b40df9a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { if (lock_fdc(drive)) - return -EINTR; + return 0; poll_drive(false, 0); process_fd_request(); } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), /* Some timers on omap4 and later */ SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 2fe225a697df..3487e03d4bc6 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, - nck(at91sam9x5_systemck), - nck(at91sam9x35_periphck), 0); + nck(at91sam9x5_systemck), 31, 0); if (!at91sam9x5_pmc) return; @@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 2; i++) { char name[6]; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index d69ad96fe988..cd0ef7274fdb 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; @@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; parent_names[5] = "audiopll_pmcck"; for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index e358be7f6c8d..b645a9d59cdb 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6ccdbedb02f3..d2477a5058ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core, if (!parent) return -EINVAL; - for (i = 0; i < core->num_parents; i++) - if (clk_core_get_parent_by_index(core, i) == parent) + for (i = 0; i < core->num_parents; i++) { + if (core->parents[i] == parent) + return i; + + if (core->parents[i]) + continue; + + /* Fallback to comparing globally unique names */ + if (!strcmp(parent->name, core->parent_names[i])) { + core->parents[i] = parent; return i; + } + } return -EINVAL; } diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c index 0026c3969b1e..76b9eb15604e 100644 --- a/drivers/clk/imx/clk-frac-pll.c +++ b/drivers/clk/imx/clk-frac-pll.c @@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_frac_pll *pll = to_clk_frac_pll(hw); u32 val, divfi, divff; - u64 temp64 = parent_rate; + u64 temp64; int ret; parent_rate *= 8; rate *= 2; divfi = rate / parent_rate; - temp64 *= rate - divfi; + temp64 = parent_rate * divfi; + temp64 = rate - temp64; temp64 *= PLL_FRAC_DENOM; do_div(temp64, parent_rate); divff = temp64; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 61fefc046ec5..d083b860f083 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -53,7 +53,6 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 -#define APMU_SP 0x68 #define MPMU_UART_PLL 0x14 struct mmp2_clk_unit { @@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), }; -static DEFINE_SPINLOCK(sp_lock); - static struct mmp_param_mux_clk apmu_mux_clks[] = { {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, @@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, - {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock}, }; static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index c782e62dd98b..58fa5c247af1 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = { "core_bi_pll_test_se", }; -static const char * const gcc_parent_names_7[] = { - "bi_tcxo", +static const char * const gcc_parent_names_7_ao[] = { + "bi_tcxo_ao", "gpll0", "gpll0_out_even", "core_bi_pll_test_se", @@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = { "core_bi_pll_test_se", }; +static const char * const gcc_parent_names_8_ao[] = { + "bi_tcxo_ao", + "gpll0", + "core_bi_pll_test_se", +}; + static const struct parent_map gcc_parent_map_10[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_ahb_clk_src", - .parent_names = gcc_parent_names_7, + .parent_names = gcc_parent_names_7_ao, .num_parents = 4, .ops = &clk_rcg2_ops, }, @@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_rbcpr_clk_src", - .parent_names = gcc_parent_names_8, + .parent_names = gcc_parent_names_8_ao, .num_parents = 3, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 3b97f60540ad..609970c0b666 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1", static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 0x060, BIT(10), 0); static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", - 0x060, BIT(12), 0); + 0x060, BIT(11), 0); static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", - 0x060, BIT(13), 0); + 0x060, BIT(12), 0); static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 0x060, BIT(13), 0); static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 621b1cd996db..ac12f261f8ca 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, [RST_BUS_VE] = { 0x2c4, BIT(0) }, - [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, [RST_BUS_CSI] = { 0x2c4, BIT(8) }, [RST_BUS_DE] = { 0x2c4, BIT(12) }, [RST_BUS_DBG] = { 0x2c4, BIT(31) }, diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 8d77090ad94a..0241450f3eb3 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, num_dividers = i; tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (!tmp) { + *table = ERR_PTR(-ENOMEM); return -ENOMEM; + } valid_div = 0; *width = 0; @@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) { struct clk_omap_divider *div; struct clk_omap_reg *reg; + int ret; if (!setup) return NULL; @@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) div->flags |= CLK_DIVIDER_POWER_OF_TWO; div->table = _get_div_table_from_setup(setup, &div->width); + if (IS_ERR(div->table)) { + ret = PTR_ERR(div->table); + kfree(div); + return ERR_PTR(ret); + } + div->shift = setup->bit_shift; div->latch = -EINVAL; diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) if (IS_ERR(parent)) return -ENODEV; + /* Bail out if both clocks point to fck */ + if (clk_is_match(parent, timer->fclk)) + return 0; + ret = clk_set_parent(timer->fclk, parent); if (ret < 0) pr_err("%s: failed to set parent\n", __func__); @@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->pdev = pdev; pm_runtime_enable(dev); - pm_runtime_irq_safe(dev); if (!timer->reserved) { ret = pm_runtime_get_sync(dev); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 242c3370544e..9ed46d188cb5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - kfree(priv); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + kfree(priv); return 0; } diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index b17d153e724f..23a1b27579a5 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, local_irq_enable(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; - u64 limit = TICK_USEC; + u64 limit = TICK_NSEC; int i; for (i = 1; i < drv->state_count; i++) { diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index fe070d75c842..4c97478d44bd 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq) struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; int req_completed = 0, err = 0, budget; + completion_t callback; + void *cb_arg; /* check all pending requests */ budget = atomic_read(&cmdq->pending_count); @@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) smp_mb__after_atomic(); /* remove from response list */ response_list_del(sr, cmdq); - /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; - - if (sr->callback) - sr->callback(sr->cb_arg, err); + callback = sr->callback; + cb_arg = sr->cb_arg; softreq_destroy(sr); + if (callback) + callback(cb_arg, err); req_completed++; } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..b0125ad65825 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_ivgen_init(new_drvdata); if (rc) { dev_err(dev, "cc_ivgen_init failed\n"); - goto post_power_mgr_err; + goto post_buf_mgr_err; } /* Allocate crypto algs */ @@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } + /* All set, we can allow autosuspend */ + cc_pm_go(new_drvdata); + /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. @@ -417,8 +420,6 @@ post_cipher_err: cc_cipher_free(new_drvdata); post_ivgen_err: cc_ivgen_fini(new_drvdata); -post_power_mgr_err: - cc_pm_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) int cc_pm_init(struct cc_drvdata *drvdata) { - int rc = 0; struct device *dev = drvdata_to_dev(drvdata); /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* activate the PM module */ - rc = pm_runtime_set_active(dev); - if (rc) - return rc; - /* enable the PM module*/ - pm_runtime_enable(dev); + return pm_runtime_set_active(dev); +} - return rc; +/* enable the PM module*/ +void cc_pm_go(struct cc_drvdata *drvdata) +{ + pm_runtime_enable(drvdata_to_dev(drvdata)); } void cc_pm_fini(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..f62624357020 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -16,6 +16,7 @@ extern const struct dev_pm_ops ccree_pm; int cc_pm_init(struct cc_drvdata *drvdata); +void cc_pm_go(struct cc_drvdata *drvdata); void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } +static void cc_pm_go(struct cc_drvdata *drvdata) {} + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} static inline int cc_pm_suspend(struct device *dev) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -203,6 +203,7 @@ struct at_xdmac_chan { u32 save_cim; u32 save_cnda; u32 save_cndc; + u32 irq_status; unsigned long status; struct tasklet_struct tasklet; struct dma_slave_config sconfig; @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc *desc; u32 error_mask; - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", - __func__, atchan->status); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_handle_cyclic(atchan); - } else if ((atchan->status & AT_XDMAC_CIS_LIS) - || (atchan->status & error_mask)) { + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) + || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->status & AT_XDMAC_CIS_RBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_WBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_ROIS) + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); spin_lock(&atchan->lock); @@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) atchan = &atxdmac->chan[i]; chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); - atchan->status = chan_status & chan_imr; + atchan->irq_status = chan_status & chan_imr; dev_vdbg(atxdmac->dma.dev, "%s: chan%d: imr=0x%x, status=0x%x\n", __func__, i, chan_imr, chan_status); @@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); tasklet_schedule(&atchan->tasklet); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static int bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) return 0; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; - - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); return 0; } @@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); @@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) list_del_init(&c->node); spin_unlock(&d->lock); - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -711,11 +711,9 @@ static int dmatest_func(void *data) srcs[i] = um->addr[i] + src_off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->to_cnt++; } @@ -730,11 +728,9 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->bidi_cnt++; } @@ -762,12 +758,10 @@ static int dmatest_func(void *data) } if (!tx) { - dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } done->done = false; @@ -776,12 +770,10 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } dma_async_issue_pending(chan); @@ -790,22 +782,20 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - dmaengine_unmap_put(um); - if (!done->done) { result("test timed out", total_tests, src_off, dst_off, len, 0); - failed_tests++; - continue; + goto error_unmap_continue; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } + dmaengine_unmap_put(um); + if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); @@ -846,6 +836,12 @@ static int dmatest_func(void *data) verbose_result("test passed", total_tests, src_off, dst_off, len, 0); } + + continue; + +error_unmap_continue: + dmaengine_unmap_put(um); + failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) { struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; - struct imxdma_desc *desc; + struct imxdma_desc *desc, *next_desc; unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); @@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, - node); + next_desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); - if (imxdma_xfer_desc(desc) < 0) + if (imxdma_xfer_desc(next_desc) < 0) dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", __func__, imxdmac->channel); } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f..92f843eaf1e0 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) } EXPORT_SYMBOL_GPL(scmi_driver_unregister); +static void scmi_device_release(struct device *dev) +{ + kfree(to_scmi_dev(dev)); +} + struct scmi_device * scmi_device_create(struct device_node *np, struct device *parent, int protocol) { @@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) scmi_dev->dev.parent = parent; scmi_dev->dev.of_node = np; scmi_dev->dev.bus = &scmi_bus_type; + scmi_dev->dev.release = scmi_device_release; dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); retval = device_register(&scmi_dev->dev); @@ -156,9 +162,8 @@ free_mem: void scmi_device_destroy(struct scmi_device *scmi_dev) { scmi_handle_put(scmi_dev->handle); - device_unregister(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, scmi_dev->id); - kfree(scmi_dev); + device_unregister(&scmi_dev->dev); } void scmi_set_handle(struct scmi_device *scmi_dev) diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 23ea1ed409d1..352bd2473162 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -37,8 +37,9 @@ extern u64 efi_system_table; static struct ptdump_info efi_ptdump_info = { .mm = &efi_mm, .markers = (struct addr_marker[]){ - { 0, "UEFI runtime start" }, - { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } + { 0, "UEFI runtime start" }, + { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }, + { -1, NULL } }, .base_addr = 0, }; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } - return 0; -} -int __init efi_apply_persistent_mem_reservations(void) -{ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { unsigned long prsv = efi.mem_reserve; diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..c037c6c5d0b7 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - if (IS_ENABLED(CONFIG_ARM)) - return; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), (void **)&rsv); if (status != EFI_SUCCESS) { diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..e2abfdb5cee6 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) static DEFINE_SEMAPHORE(efi_runtime_lock); /* + * Expose the EFI runtime lock to the UV platform + */ +#ifdef CONFIG_X86_UV +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); +#endif + +/* * Calls the appropriate efi_runtime_service() with the appropriate * arguments. * diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a1a09e04fab8..13851b3d1c56 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -508,14 +508,11 @@ static int __init s10_init(void) return -ENODEV; np = of_find_matching_node(fw_np, s10_of_match); - if (!np) { - of_node_put(fw_np); + if (!np) return -ENODEV; - } of_node_put(np); ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); - of_node_put(fw_np); if (ret) return ret; diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 6b11f1314248..7f9e0304b510 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc, static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, unsigned int nr, int value) { - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) + if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) { + altr_a10sr_gpio_set(gc, nr, value); return 0; + } return -EINVAL; } diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index e0d6a0a7bc69..e41223c05f6e 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset) static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) { - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + struct sprd_eic *sprd_eic = gpiochip_get_data(chip); + + switch (sprd_eic->type) { + case SPRD_EIC_DEBOUNCE: + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + case SPRD_EIC_ASYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); + case SPRD_EIC_SYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); + default: + return -ENOTSUPP; + } } static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index 00e954f22bc9..74401e0adb29 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -30,6 +30,7 @@ #define GPIO_REG_EDGE 0xA0 struct mtk_gc { + struct irq_chip irq_chip; struct gpio_chip chip; spinlock_t lock; int bank; @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip mediatek_gpio_irq_chip = { - .irq_unmask = mediatek_gpio_irq_unmask, - .irq_mask = mediatek_gpio_irq_mask, - .irq_mask_ack = mediatek_gpio_irq_mask, - .irq_set_type = mediatek_gpio_irq_type, -}; - static int mediatek_gpio_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) @@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } + rg->irq_chip.name = dev_name(dev); + rg->irq_chip.parent_device = dev; + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; + if (mtk->gpio_irq) { /* * Manually request the irq here instead of passing @@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add gpiochip_irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, mtk->gpio_irq, NULL); } @@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev) mtk->gpio_irq = irq_of_parse_and_map(np, 0); mtk->dev = dev; platform_set_drvdata(pdev, mtk); - mediatek_gpio_irq_chip.name = dev_name(dev); for (i = 0; i < MTK_BANK_CNT; i++) { ret = mediatek_gpio_bank_probe(dev, np, i); diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index adf72dda25a2..68a35b65925a 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table); */ struct pcf857x { struct gpio_chip chip; + struct irq_chip irqchip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ unsigned out; /* software latch */ @@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) mutex_unlock(&gpio->lock); } -static struct irq_chip pcf857x_irq_chip = { - .name = "pcf857x", - .irq_enable = pcf857x_irq_enable, - .irq_disable = pcf857x_irq_disable, - .irq_ack = noop, - .irq_mask = noop, - .irq_unmask = noop, - .irq_set_wake = pcf857x_irq_set_wake, - .irq_bus_lock = pcf857x_irq_bus_lock, - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, -}; - /*-------------------------------------------------------------------------*/ static int pcf857x_probe(struct i2c_client *client, @@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client, /* Enable irqchip if we have an interrupt */ if (client->irq) { + gpio->irqchip.name = "pcf857x", + gpio->irqchip.irq_enable = pcf857x_irq_enable, + gpio->irqchip.irq_disable = pcf857x_irq_disable, + gpio->irqchip.irq_ack = noop, + gpio->irqchip.irq_mask = noop, + gpio->irqchip.irq_unmask = noop, + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, status = gpiochip_irqchip_add_nested(&gpio->chip, - &pcf857x_irq_chip, + &gpio->irqchip, 0, handle_level_irq, IRQ_TYPE_NONE); if (status) { @@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client, if (status) goto fail; - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, client->irq); gpio->irq_parent = client->irq; } diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index e9600b556f39..bcc6be4a5cb2 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) { switch (gpio_type) { case PXA3XX_GPIO: + case MMP2_GPIO: return false; default: diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 1b79ebcfce3e..541fa6ac399d 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + int i; int ret; port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); @@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev) if (ret < 0) return ret; + /* Mask all GPIO interrupts */ + for (i = 0; i < gc->ngpio; i++) + vf610_gpio_writel(0, port->base + PORT_PCR(i)); + /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 259cf6ab969b..4d291b75cb9f 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -530,17 +530,24 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) if (ares->type != ACPI_RESOURCE_TYPE_GPIO) return 1; - if (lookup->n++ == lookup->index && !lookup->desc) { + if (!lookup->desc) { const struct acpi_resource_gpio *agpio = &ares->data.gpio; - int pin_index = lookup->pin_index; + bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + int pin_index; + if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint) + lookup->index++; + + if (lookup->n++ != lookup->index) + return 1; + + pin_index = lookup->pin_index; if (pin_index >= agpio->pin_table_length) return 1; lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr, agpio->pin_table[pin_index]); - lookup->info.gpioint = - agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + lookup->info.gpioint = gpioint; /* * Polarity and triggering are only specified for GpioInt diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 1651d7f0a303..d1adfdf50fb3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) /* Do not leak kernel stack to userspace */ memset(&ge, 0, sizeof(ge)); - ge.timestamp = le->timestamp; + /* + * We may be running from a nested threaded interrupt in which case + * we didn't get the timestamp from lineevent_irq_handler(). + */ + if (!le->timestamp) + ge.timestamp = ktime_get_real_ns(); + else + ge.timestamp = le->timestamp; if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bc62bf41b7e9..5dc349173e4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (amdgpu_device_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + (attr == &sensor_dev_attr_power1_average.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -38,6 +38,7 @@ #include "amdgpu_gem.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> +#include <linux/dma-fence-array.h> /** * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table @@ -187,6 +188,48 @@ error: return ERR_PTR(ret); } +static int +__reservation_object_make_exclusive(struct reservation_object *obj) +{ + struct dma_fence **fences; + unsigned int count; + int r; + + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + return 0; + + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + if (r) + return r; + + if (count == 0) { + /* Now that was unexpected. */ + } else if (count == 1) { + reservation_object_add_excl_fence(obj, fences[0]); + dma_fence_put(fences[0]); + kfree(fences); + } else { + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, + false); + if (!array) + goto err_fences_put; + + reservation_object_add_excl_fence(obj, &array->base); + dma_fence_put(&array->base); + } + + return 0; + +err_fences_put: + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; +} + /** * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation * @dma_buf: Shared DMA buffer @@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, if (attach->dev->driver != adev->dev->driver) { /* - * Wait for all shared fences to complete before we switch to future - * use of exclusive fence on this prime shared bo. + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. */ - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(r < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + r = __reservation_object_make_exclusive(bo->tbo.resv); + if (r) goto error_unreserve; - } } /* pin buffer into GTT */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); adev->psp.asd_fw = NULL; - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; + if (adev->psp.ta_fw) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + } return 0; } @@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) struct ta_xgmi_shared_memory *xgmi_cmd; int ret; + if (!psp->adev->psp.ta_fw) + return -ENOENT; + if (!psp->xgmi_context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..698bcb8ce61d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base; +#if 0 if (vm->bulk_moveable) { spin_lock(&glob->lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); spin_unlock(&glob->lru_lock); return; } +#endif memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); @@ -3363,14 +3365,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; + unsigned long flags; - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (vm) *task_info = vm->task_info; - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, bool enable) { + u32 tmp = 0; + if (enable) { + tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); } static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) - goto out2; - - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); - adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); - adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); + adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); + adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6811a5d05b27..aa2f71cc1eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), @@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) case CHIP_RAVEN: adev->asic_funcs = &soc15_asic_funcs; if (adev->rev_id >= 0x8) - adev->external_rev_id = adev->rev_id + 0x81; + adev->external_rev_id = adev->rev_id + 0x79; else if (adev->pdev->device == 0x15d8) adev->external_rev_id = adev->rev_id + 0x41; + else if (adev->rev_id == 1) + adev->external_rev_id = adev->rev_id + 0x20; else - adev->external_rev_id = 0x1; + adev->external_rev_id = adev->rev_id + 0x01; if (adev->rev_id >= 0x8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, return 0; } -#if CONFIG_X86_64 +#ifdef CONFIG_X86_64 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, uint32_t *num_entries, struct crat_subtype_iolink *sub_type_hdr) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..5296b8f3e0ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -786,12 +786,13 @@ static int dm_suspend(void *handle) struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); + s3_handle_mst(adev->ddev, true); amdgpu_dm_irq_suspend(adev); - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); @@ -4082,7 +4083,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, } if (connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_vrr_capable_property( &aconnector->base); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us return bytes_from_user; } +/* + * Returns the min and max vrr vfreq through the connector's debugfs file. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range + */ +static int vrr_range_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); + seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(vrr_range); + static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, .read = dp_link_settings_read, @@ -697,7 +716,8 @@ static const struct { } dp_debugfs_entries[] = { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, - {"test_pattern", &dp_phy_test_pattern_fops} + {"test_pattern", &dp_phy_test_pattern_fops}, + {"vrr_range", &vrr_range_fops} }; int connector_debugfs_init(struct amdgpu_dm_connector *connector) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..7a72ee46f14b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); - pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + /* + * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. + * This is not required for less than 5 displays, + * thus don't request decfclk in dc to avoid impact + * on power saving. + * + */ + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? + pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; @@ -654,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; + int patched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -663,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); + clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index acd418515346..a6b80fdaa666 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -37,6 +37,10 @@ void dce100_prepare_bandwidth( struct dc *dc, struct dc_state *context); +void dce100_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); + bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating); diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index a60a90e68d91..c4543178ba20 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; - dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index cdd1d6b7b9f2..4e9ea50141bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -790,9 +790,22 @@ bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw.dce.dispclk_khz = 681000; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw.dce.dispclk_khz = 0; + context->bw.dce.yclk_khz = 0; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 58a12ddf12f3..41883c981789 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2658,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; - pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; - pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; + pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; + pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, break; case amd_pp_dpp_clock: pclk_vol_table = pinfo->vdd_dep_on_dppclk; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, object_count = cl->object_count; - object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), + array_size(object_count, sizeof(__u32))); if (IS_ERR(object_ids)) return PTR_ERR(object_ids); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) if (mode->hsync) return mode->hsync; - if (mode->htotal < 0) + if (mode->htotal <= 0) return 0; calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } +static inline bool +__vma_matches(struct vm_area_struct *vma, struct file *filp, + unsigned long addr, unsigned long size) +{ + if (vma->vm_file != filp) + return false; + + return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; +} + /** * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address * it is mapped to. @@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return -EINTR; } vma = find_vma(mm, addr); - if (vma) + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) * Update the bitmask of enabled events and increment * the event reference counter. */ - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); i915->pmu.enable |= BIT_ULL(bit); i915->pmu.enable_count[bit]++; @@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - engine->pmu.enable |= BIT(sample); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != + I915_ENGINE_SAMPLE_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != + I915_ENGINE_SAMPLE_COUNT); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); + + engine->pmu.enable |= BIT(sample); engine->pmu.enable_count[sample]++; } @@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); + /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. @@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) engine->pmu.enable &= ~BIT(sample); } - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); /* * Decrement the reference count and clear the enabled diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -31,6 +31,8 @@ enum { ((1 << I915_PMU_SAMPLE_BITS) + \ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) +#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) + struct i915_pmu_sample { u64 cur; }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1790,7 +1790,7 @@ enum i915_power_well_id { #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 -#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ @@ -1798,7 +1798,7 @@ enum i915_power_well_id { _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_F_GRP_OFFSET) + \ 4 * (dw)) -#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ @@ -1834,9 +1834,9 @@ enum i915_power_well_id { #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 -#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) -#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ +#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) +#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) +#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) @@ -1864,8 +1864,12 @@ enum i915_power_well_id { #define RTERM_SELECT(x) ((x) << 3) #define RTERM_SELECT_MASK (0x7 << 3) -#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) -#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) +#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) +#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) +#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) +#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) +#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) +#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ }; -struct icl_combo_phy_ddi_buf_trans { - u32 dw2_swing_select; - u32 dw2_swing_scalar; - u32 dw4_scaling; -}; - -/* Voltage Swing Programming for VccIO 0.85V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ - { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ - { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ -}; - -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.85V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +/* icl_combo_phy_ddi_translations */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.95V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ }; -/* Voltage Swing Programming for VccIO 1.05V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 1.05V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ }; struct icl_mg_phy_ddi_buf_trans { @@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } } -static const struct icl_combo_phy_ddi_buf_trans * +static const struct cnl_ddi_buf_trans * icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, - int type, int *n_entries) + int type, int rate, int *n_entries) { - u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; - - if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); - return icl_combo_phy_ddi_translations_edp_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); - return icl_combo_phy_ddi_translations_edp_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); - return icl_combo_phy_ddi_translations_edp_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } - } else { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); - return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; } + + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; } static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) @@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por if (IS_ICELAKE(dev_priv)) { if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, - INTEL_OUTPUT_HDMI, &n_entries); + icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + 0, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); default_entry = n_entries - 1; @@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, return DDI_CLK_SEL_TBT_810; default: MISSING_CASE(clock); - break; + return DDI_CLK_SEL_NONE; } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: @@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; int n_entries; if (IS_ICELAKE(dev_priv)) { if (intel_port_is_combophy(dev_priv, port)) icl_get_combo_buf_trans(dev_priv, port, encoder->type, - &n_entries); + intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) + u32 level, enum port port, int type, + int rate) { - const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; + const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, - &n_entries); + rate, &n_entries); if (!ddi_translations) return; @@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, level = n_entries - 1; } - /* Set PORT_TX_DW5 Rterm Sel to 110b. */ + /* Set PORT_TX_DW5 */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - val &= ~RTERM_SELECT_MASK; + val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | + TAP2_DISABLE | TAP3_DISABLE); + val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); - - /* Program PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - /* Set DisableTap2 and DisableTap3 if MIPI DSI - * Clear DisableTap2 and DisableTap3 for all other Ports - */ - if (type == INTEL_OUTPUT_DSI) { - val |= TAP2_DISABLE; - val |= TAP3_DISABLE; - } else { - val &= ~TAP2_DISABLE; - val &= ~TAP3_DISABLE; - } + val |= TAP3_DISABLE; I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW2 */ val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); - val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); + val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); + val |= RCOMP_SCALAR(0x98); I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); /* Program PORT_TX_DW4 */ @@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); - val |= ddi_translations[level].dw4_scaling; + val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); } + + /* Program PORT_TX_DW7 */ + val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; + val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, port, type); + icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); /* 6. Set training enable to trigger update */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, } } +static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* + * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram + * the hardware when a high res displays plugged in. DPLL P + * divider is zero, and the pipe timings are bonkers. We'll + * try to disable everything in that case. + * + * FIXME would be nice to be able to sanitize this state + * without several WARNs, but for now let's take the easy + * road. + */ + return IS_GEN6(dev_priv) && + crtc_state->base.active && + crtc_state->shared_dpll && + crtc_state->port_clock == 0; +} + static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc_state *crtc_state = crtc ? + to_intel_crtc_state(crtc->base.state) : NULL; /* We need to check both for a crtc link (meaning that the * encoder is active and trying to read from a pipe) and the * pipe itself being active. */ - bool has_active_crtc = encoder->base.crtc && - to_intel_crtc(encoder->base.crtc)->active; + bool has_active_crtc = crtc_state && + crtc_state->base.active; + + if (crtc_state && has_bogus_dpll_config(crtc_state)) { + DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", + pipe_name(crtc->pipe)); + has_active_crtc = false; + } connector = intel_encoder_find_connector(encoder); if (connector && !has_active_crtc) { @@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* Connector is active, but has no active pipe. This is * fallout from our resume register restoring. Disable * the encoder manually again. */ - if (encoder->base.crtc) { - struct drm_crtc_state *crtc_state = encoder->base.crtc->state; + if (crtc_state) { + struct drm_encoder *best_encoder; DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); + + /* avoid oopsing in case the hooks consult best_encoder */ + best_encoder = connector->base.state->best_encoder; + connector->base.state->best_encoder = &encoder->base; + if (encoder->disable) - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->disable(encoder, crtc_state, + connector->base.state); if (encoder->post_disable) - encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->post_disable(encoder, crtc_state, + connector->base.state); + + connector->base.state->best_encoder = best_encoder; } encoder->base.crtc = NULL; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; - if (port == PORT_B) + if (intel_port_is_combophy(dev_priv, port) && + !intel_dp_is_edp(intel_dp)) return 540000; return 810000; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -209,6 +209,16 @@ struct intel_fbdev { unsigned long vma_flags; async_cookie_t cookie; int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended : 1; + /* Set when a hotplug was received while HPD processing was + * suspended + */ + bool hpd_waiting : 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; }; struct intel_encoder { diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..4ee16b264dbe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); - unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); + unsigned long conn_configured, conn_seq; int i, j; bool *save_enabled; bool fallback = true, ret = true; @@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, drm_modeset_backoff(&ctx); memcpy(save_enabled, enabled, count); - mask = GENMASK(count - 1, 0); + conn_seq = GENMASK(count - 1, 0); conn_configured = 0; retry: - conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -371,7 +370,8 @@ retry: if (conn_configured & BIT(i)) continue; - if (conn_seq == 0 && !connector->has_tile) + /* First pass, only consider tiled connectors */ + if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -475,8 +475,10 @@ retry: conn_configured |= BIT(i); } - if ((conn_configured & mask) != mask && conn_configured != conn_seq) + if (conn_configured != conn_seq) { /* repeat until no more are found */ + conn_seq = conn_configured; goto retry; + } /* * If the BIOS didn't enable everything it could, fall back to have the @@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; + mutex_init(&ifbdev->hpd_lock); drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) @@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) intel_fbdev_destroy(ifbdev); } +/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD + * processing, fbdev will perform a full connector reprobe if a hotplug event + * was received while HPD was suspended. + */ +static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) +{ + bool send_hpd = false; + + mutex_lock(&ifbdev->hpd_lock); + ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; + send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; + ifbdev->hpd_waiting = false; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd) { + DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); + drm_fb_helper_hotplug_event(&ifbdev->helper); + } +} + void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->fbdev_suspend_work); + console_lock(); } else { /* @@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); + + intel_fbdev_hpd_set_suspend(ifbdev, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + bool send_hpd; if (!ifbdev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma || ifbdev->helper.deferred_setup) + + mutex_lock(&ifbdev->hpd_lock); + send_hpd = !ifbdev->hpd_suspended; + ifbdev->hpd_waiting = true; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -55,7 +55,12 @@ struct opregion_header { u8 signature[16]; u32 size; - u32 opregion_ver; + struct { + u8 rsvd; + u8 revision; + u8 minor; + u8 major; + } __packed over; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; @@ -119,7 +124,8 @@ struct opregion_asle { u64 fdss; u32 fdsp; u32 stat; - u64 rvda; /* Physical address of raw vbt data */ + u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) + * address of raw VBT data. */ u32 rvds; /* Size of raw vbt data */ u8 rsvd[58]; } __packed; @@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->header = base; opregion->lid_state = base + ACPI_CLID; + DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", + opregion->header->over.major, + opregion->header->over.minor, + opregion->header->over.revision); + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); @@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (dmi_check_system(intel_no_opregion_vbt)) goto out; - if (opregion->header->opregion_ver >= 2 && opregion->asle && + if (opregion->header->over.major >= 2 && opregion->asle && opregion->asle->rvda && opregion->asle->rvds) { - opregion->rvda = memremap(opregion->asle->rvda, - opregion->asle->rvds, + resource_size_t rvda = opregion->asle->rvda; + + /* + * opregion 2.0: rvda is the physical VBT address. + * + * opregion 2.1+: rvda is unsigned, relative offset from + * opregion base, and should never point within opregion. + */ + if (opregion->header->over.major > 2 || + opregion->header->over.minor >= 1) { + WARN_ON(rvda < OPREGION_SIZE); + + rvda += asls; + } + + opregion->rvda = memremap(rvda, opregion->asle->rvds, MEMREMAP_WB); + vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { @@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) goto out; } else { DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); + memunmap(opregion->rvda); + opregion->rvda = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -415,16 +415,17 @@ struct intel_engine_cs { /** * @enable_count: Reference count for the enabled samplers. * - * Index number corresponds to the bit number from @enable. + * Index number corresponds to @enum drm_i915_pmu_engine_sample. */ - unsigned int enable_count[I915_PMU_SAMPLE_BITS]; + unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; /** * @sample: Counter values for sampling events. * * Our internal timer stores the current counters in this field. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. */ -#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) - struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; + struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; } pmu; /* diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - keymsk = key->channel_mask & 0x3ffffff; + keymsk = key->channel_mask & 0x7ffffff; if (alpha < 0xff) keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) int bus_format; ret = of_property_read_u32(child, "reg", &i); - if (ret || i < 0 || i > 1) - return -EINVAL; + if (ret || i < 0 || i > 1) { + ret = -EINVAL; + goto free_child; + } if (!of_device_is_available(child)) continue; @@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; - channel->child = child; /* * The output port is port@4 with an external 4-port mux or @@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) imx_ldb->lvds_mux ? 4 : 2, 0, &channel->panel, &channel->bridge); if (ret && ret != -ENODEV) - return ret; + goto free_child; /* panel ddc only if there is no bridge */ if (!channel->bridge) { ret = imx_ldb_panel_ddc(dev, channel, child); if (ret) - return ret; + goto free_child; } bus_format = of_get_bus_format(dev, child); @@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); - return bus_format; + ret = bus_format; + goto free_child; } channel->bus_format = bus_format; + channel->child = child; ret = imx_ldb_register(drm, channel); - if (ret) - return ret; + if (ret) { + channel->child = NULL; + goto free_child; + } } dev_set_drvdata(dev, imx_ldb); return 0; + +free_child: + of_node_put(child); + return ret; } static void imx_ldb_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - /* CRTC should be enabled */ + /* nothing to check when disabling or disabled */ if (!crtc_state->enable) - return -EINVAL; + return 0; switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; @@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; unsigned long flags; struct dsi_irq_stats stats; @@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) static int dsi_dump_dsi_regs(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; if (dsi_runtime_get(dsi)) return 0; @@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; + /* + * HACK: These flags should be handled through the omap_dss_device bus + * flags, but this will only be possible when the DSI encoder will be + * converted to the omapdrm-managed encoder model. + */ + dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; + dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; + dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; + dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; dss_mgr_set_timings(&dsi->output, &dsi->vm); @@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); dsi->debugfs.regs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_regs, &dsi); + dsi_dump_dsi_regs, dsi); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_irqs, &dsi); + dsi_dump_dsi_irqs, dsi); #endif snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); dsi->debugfs.clks = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_clocks, &dsi); + dsi_dump_dsi_clocks, dsi); return 0; } @@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) dss_debugfs_remove_file(dsi->debugfs.irqs); dss_debugfs_remove_file(dsi->debugfs.regs); - of_platform_depopulate(dev); - WARN_ON(dsi->scp_clk_refcount > 0); dss_pll_unregister(&dsi->pll); @@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) dsi_uninit_output(dsi); + of_platform_depopulate(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) u16 data_offset, size; u8 frev, crev; struct ci_power_info *pi; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dec1e081f529..6a8fb6fd183c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (radeon_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) struct ni_power_info *ni_pi; struct si_power_info *si_pi; struct atom_clock_dividers dividers; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { si_pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 37f93022a106..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -1,17 +1,8 @@ -//SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author: * Sandy Huang <hjc@rock-chips.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <drm/drmP.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h @@ -1,17 +1,8 @@ -//SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author: * Sandy Huang <hjc@rock-chips.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifdef CONFIG_ROCKCHIP_RGB diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + trace_drm_sched_job_wait_dep(sched_job, entity->dependency); - if (drm_sched_entity_add_dependency_cb(entity)) { - - trace_drm_sched_job_wait_dep(sched_job, - entity->dependency); + if (drm_sched_entity_add_dependency_cb(entity)) return NULL; - } } /* skip jobs from entity that marked guilty */ diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, return PTR_ERR(tcon->sclk0); } } + clk_prepare_enable(tcon->sclk0); if (tcon->quirks->has_channel_1) { tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); @@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) { + clk_disable_unprepare(tcon->sclk0); clk_disable_unprepare(tcon->clk); } diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c @@ -1,4 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ + #include "vkms_drv.h" #include <linux/crc32.h> #include <drm/drm_atomic.h> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_atomic_helper.h> diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -1,9 +1,4 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ /** * DOC: vkms (Virtual Kernel Modesetting) diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + #ifndef _VKMS_DRV_H_ #define _VKMS_DRV_H_ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/shmem_fs.h> diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_crtc_helper.h> diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_plane_helper.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -26,6 +26,7 @@ **************************************************************************/ #include <linux/module.h> #include <linux/console.h> +#include <linux/dma-mapping.h> #include <drm/drmP.h> #include "vmwgfx_drv.h" @@ -34,7 +35,6 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <linux/intel-iommu.h> #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 @@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) } /** + * vmw_assume_iommu - Figure out whether coherent dma-remapping might be + * taking place. + * @dev: Pointer to the struct drm_device. + * + * Return: true if iommu present, false otherwise. + */ +static bool vmw_assume_iommu(struct drm_device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev->dev); + + return !dma_is_direct(ops) && ops && + ops->map_page != dma_direct_map_page; +} + +/** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. * @@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; -#ifdef CONFIG_X86 - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_enabled) { + if (vmw_force_coherent) + dev_priv->map_mode = vmw_dma_alloc_coherent; + else if (vmw_assume_iommu(dev_priv->dev)) dev_priv->map_mode = vmw_dma_map_populate; - goto out_fixup; - } -#endif - - if (!(vmw_force_iommu || vmw_force_coherent)) { + else if (!vmw_force_iommu) dev_priv->map_mode = vmw_dma_phys; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; - } - - dev_priv->map_mode = vmw_dma_map_populate; - - if (dma_ops && dma_ops->sync_single_for_cpu) + else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) dev_priv->map_mode = vmw_dma_alloc_coherent; -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl() == 0) + else dev_priv->map_mode = vmw_dma_map_populate; -#endif -#ifdef CONFIG_INTEL_IOMMU -out_fixup: -#endif - if (dev_priv->map_mode == vmw_dma_map_populate && - vmw_restrict_iommu) + if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) dev_priv->map_mode = vmw_dma_map_bind; - if (vmw_force_coherent) - dev_priv->map_mode = vmw_dma_alloc_coherent; - -#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) - /* - * No coherent page pool - */ - if (dev_priv->map_mode == vmw_dma_alloc_coherent) + /* No TTM coherent page pool? FIXME: Ask TTM instead! */ + if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; -#endif - -#else /* CONFIG_X86 */ - dev_priv->map_mode = vmw_dma_map_populate; -#endif /* CONFIG_X86 */ DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; } @@ -625,24 +612,20 @@ out_fixup: * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems. */ -#ifdef CONFIG_INTEL_IOMMU static int vmw_dma_masks(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + int ret = 0; - if (intel_iommu_enabled && + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); + if (dev_priv->map_mode != vmw_dma_phys && (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { DRM_INFO("Restricting DMA addresses to 44 bits.\n"); - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); } - return 0; -} -#else -static int vmw_dma_masks(struct vmw_private *dev_priv) -{ - return 0; + + return ret; } -#endif static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, *p_fence = NULL; } - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; - if (!du->pref_active) { + if (!du->pref_active && new_crtc_state->enable) { ret = -EINVAL; goto clean; } @@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, user_fence_rep) { struct vmw_fence_obj *fence = NULL; - uint32_t handle; - int ret; + uint32_t handle = 0; + int ret = 0; if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || out_fence) diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { .cpmem_ofs = 0x1f000000, .srm_ofs = 0x1f040000, .tpm_ofs = 0x1f060000, - .csi0_ofs = 0x1f030000, - .csi1_ofs = 0x1f038000, + .csi0_ofs = 0x1e030000, + .csi1_ofs = 0x1e038000, .ic_ofs = 0x1e020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, @@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { .cpmem_ofs = 0x07000000, .srm_ofs = 0x07040000, .tpm_ofs = 0x07060000, - .csi0_ofs = 0x07030000, - .csi1_ofs = 0x07038000, + .csi0_ofs = 0x06030000, + .csi1_ofs = 0x06038000, .ic_ofs = 0x06020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -106,6 +106,7 @@ struct ipu_pre { void *buffer_virt; bool in_use; unsigned int safe_window_end; + unsigned int last_bufaddr; }; static DEFINE_MUTEX(ipu_pre_list_mutex); @@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | @@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) unsigned short current_yblock; u32 val; + if (bufaddr == pre->last_bufaddr) + return; + writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; do { if (time_after(jiffies, timeout)) { diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c530476edba6..ac9fda1b5a72 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,6 +30,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/kfifo.h> #include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> @@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { - unsigned i; struct hid_debug_list *list; unsigned long flags; spin_lock_irqsave(&hdev->debug_list_lock, flags); - list_for_each_entry(list, &hdev->debug_list, node) { - for (i = 0; buf[i]; i++) - list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = - buf[i]; - list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; - } + list_for_each_entry(list, &hdev->debug_list, node) + kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); @@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu hid_debug_event(hdev, buf); kfree(buf); - wake_up_interruptible(&hdev->debug_wait); - + wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); @@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) goto out; } - if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { - err = -ENOMEM; + err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); + if (err) { kfree(list); goto out; } @@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; - int ret = 0, len; + int ret = 0, copied; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); - while (ret == 0) { - if (list->head == list->tail) { - add_wait_queue(&list->hdev->debug_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - while (list->head == list->tail) { - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } + if (kfifo_is_empty(&list->hid_debug_fifo)) { + add_wait_queue(&list->hdev->debug_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + while (kfifo_is_empty(&list->hid_debug_fifo)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } - if (!list->hdev || !list->hdev->debug) { - ret = -EIO; - set_current_state(TASK_RUNNING); - goto out; - } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } - /* allow O_NONBLOCK from other threads */ - mutex_unlock(&list->read_mutex); - schedule(); - mutex_lock(&list->read_mutex); - set_current_state(TASK_INTERRUPTIBLE); + /* if list->hdev is NULL we cannot remove_wait_queue(). + * if list->hdev->debug is 0 then hid_debug_unregister() + * was already called and list->hdev is being destroyed. + * if we add remove_wait_queue() here we can hit a race. + */ + if (!list->hdev || !list->hdev->debug) { + ret = -EIO; + set_current_state(TASK_RUNNING); + goto out; } - set_current_state(TASK_RUNNING); - remove_wait_queue(&list->hdev->debug_wait, &wait); + /* allow O_NONBLOCK from other threads */ + mutex_unlock(&list->read_mutex); + schedule(); + mutex_lock(&list->read_mutex); + set_current_state(TASK_INTERRUPTIBLE); } - if (ret) - goto out; + __set_current_state(TASK_RUNNING); + remove_wait_queue(&list->hdev->debug_wait, &wait); - /* pass the ringbuffer contents to userspace */ -copy_rest: - if (list->tail == list->head) + if (ret) goto out; - if (list->tail > list->head) { - len = list->tail - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - ret += len; - list->head += len; - } else { - len = HID_DEBUG_BUFSIZE - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - list->head = 0; - ret += len; - count -= len; - if (count > 0) - goto copy_rest; - } - } + + /* pass the fifo content to userspace, locking is not needed with only + * one concurrent reader and one concurrent writer + */ + ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); + if (ret) + goto out; + ret = copied; out: mutex_unlock(&list->read_mutex); return ret; @@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); - if (list->head != list->tail) + if (!kfifo_is_empty(&list->hid_debug_fifo)) return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) return EPOLLERR | EPOLLHUP; @@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); - kfree(list->hid_debug_buf); + kfifo_free(&list->hid_debug_fifo); kfree(list); return 0; @@ -1246,4 +1221,3 @@ void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); } - diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4adec4ab7d06..59ee01f3d022 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) fan5pin |= cr1b & BIT(5); fan5pin |= creb & BIT(5); - fan6pin = creb & BIT(3); + fan6pin = !dsw_en && (cr2d & BIT(1)); + fan6pin |= creb & BIT(3); pwm5pin |= cr2d & BIT(7); pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ec6e69aa3a8e..d2fbb4bb4a43 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); } +static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) +{ + i2c_dev->curr_msg = NULL; + i2c_dev->num_msgs = 0; + + i2c_dev->msg_buf = NULL; + i2c_dev->msg_buf_remaining = 0; +} + /* * Note about I2C_C_CLEAR on error: * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in @@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], time_left = wait_for_completion_timeout(&i2c_dev->completion, adap->timeout); + + bcm2835_i2c_finish_transfer(i2c_dev); + if (!time_left) { bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291..d917cefc5a19 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->recv_count > CDNS_I2C_FIFO_DEPTH) + if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->send_count > CDNS_I2C_FIFO_DEPTH) + if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b1086bfb0465..cd9c65f3d404 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int omap_i2c_runtime_suspend(struct device *dev) +static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev) return 0; } -static int omap_i2c_runtime_resume(struct device *dev) +static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev) } static const struct dev_pm_ops omap_i2c_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, omap_i2c_runtime_resume, NULL) }; -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) -#else -#define OMAP_I2C_PM_OPS NULL -#endif /* CONFIG_PM */ static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", - .pm = OMAP_I2C_PM_OPS, + .pm = &omap_i2c_pm_ops, .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index c39f89d2deba..2dc628d4f1ae 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, ret = i3c_master_retrieve_dev_info(newdev); if (ret) - goto err_free_dev; + goto err_detach_dev; olddev = i3c_master_search_i3c_dev_duplicate(newdev); if (olddev) { diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index f8c00b94817f..bb03079fbade 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, spin_unlock_irqrestore(&master->xferqueue.lock, flags); } -static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, - struct dw_i3c_xfer *xfer) +static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, + struct dw_i3c_xfer *xfer) { - unsigned long flags; - - spin_lock_irqsave(&master->xferqueue.lock, flags); if (master->xferqueue.cur == xfer) { u32 status; @@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, } else { list_del_init(&xfer->node); } +} + +static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, + struct dw_i3c_xfer *xfer) +{ + unsigned long flags; + + spin_lock_irqsave(&master->xferqueue.lock, flags); + dw_i3c_master_dequeue_xfer_locked(master, xfer); spin_unlock_irqrestore(&master->xferqueue.lock, flags); } @@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) complete(&xfer->comp); if (ret < 0) { - dw_i3c_master_dequeue_xfer(master, xfer); + dw_i3c_master_dequeue_xfer_locked(master, xfer); writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, master->regs + DEVICE_CTRL); } diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index da58020a144e..33a28cde126c 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense); int ide_queue_sense_rq(ide_drive_t *drive, void *special) { - struct request *sense_rq = drive->sense_rq; + ide_hwif_t *hwif = drive->hwif; + struct request *sense_rq; + unsigned long flags; + + spin_lock_irqsave(&hwif->lock, flags); /* deferred failure from ide_prep_sense() */ if (!drive->sense_rq_armed) { printk(KERN_WARNING PFX "%s: error queuing a sense request\n", drive->name); + spin_unlock_irqrestore(&hwif->lock, flags); return -ENOMEM; } + sense_rq = drive->sense_rq; ide_req(sense_rq)->special = special; drive->sense_rq_armed = false; drive->hwif->rq = NULL; ide_insert_request_head(drive, sense_rq); + spin_unlock_irqrestore(&hwif->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ide_queue_sense_rq); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8445b484ae69..b137f27a34d5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, } if (!blk_update_request(rq, error, nr_bytes)) { - if (rq == drive->sense_rq) + if (rq == drive->sense_rq) { drive->sense_rq = NULL; + drive->sense_rq_active = false; + } __blk_mq_end_request(rq, error); return 0; @@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); } -/* - * Issue a new request to a device. - */ -blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) +blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq, + bool local_requeue) { - ide_drive_t *drive = hctx->queue->queuedata; - ide_hwif_t *hwif = drive->hwif; + ide_hwif_t *hwif = drive->hwif; struct ide_host *host = hwif->host; - struct request *rq = bd->rq; ide_startstop_t startstop; if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { @@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, if (ide_lock_host(host, hwif)) return BLK_STS_DEV_RESOURCE; - blk_mq_start_request(rq); - spin_lock_irq(&hwif->lock); if (!ide_lock_port(hwif)) { @@ -511,18 +506,6 @@ repeat: drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); /* - * we know that the queue isn't empty, but this can happen - * if ->prep_rq() decides to kill a request - */ - if (!rq) { - rq = bd->rq; - if (!rq) { - ide_unlock_port(hwif); - goto out; - } - } - - /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the blk_fetch_request() @@ -560,9 +543,12 @@ repeat: } } else { plug_device: + if (local_requeue) + list_add(&rq->queuelist, &drive->rq_list); spin_unlock_irq(&hwif->lock); ide_unlock_host(host); - ide_requeue_and_plug(drive, rq); + if (!local_requeue) + ide_requeue_and_plug(drive, rq); return BLK_STS_OK; } @@ -573,6 +559,26 @@ out: return BLK_STS_OK; } +/* + * Issue a new request to a device. + */ +blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + ide_drive_t *drive = hctx->queue->queuedata; + ide_hwif_t *hwif = drive->hwif; + + spin_lock_irq(&hwif->lock); + if (drive->sense_rq_active) { + spin_unlock_irq(&hwif->lock); + return BLK_STS_DEV_RESOURCE; + } + spin_unlock_irq(&hwif->lock); + + blk_mq_start_request(bd->rq); + return ide_issue_rq(drive, bd->rq, false); +} + static int drive_is_ready(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; @@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer); void ide_insert_request_head(ide_drive_t *drive, struct request *rq) { - ide_hwif_t *hwif = drive->hwif; - unsigned long flags; - - spin_lock_irqsave(&hwif->lock, flags); + drive->sense_rq_active = true; list_add_tail(&rq->queuelist, &drive->rq_list); - spin_unlock_irqrestore(&hwif->lock, flags); - kblockd_schedule_work(&drive->rq_work); } EXPORT_SYMBOL_GPL(ide_insert_request_head); diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 102aa3bc3e7f..8af7af6001eb 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c @@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; scsi_req(rq)->cmd_len = 1; ide_req(rq)->type = ATA_PRIV_MISC; + spin_lock_irq(&hwif->lock); ide_insert_request_head(drive, rq); + spin_unlock_irq(&hwif->lock); out: return; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 63627be0811a..5aeaca24a28f 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work) ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); ide_hwif_t *hwif = drive->hwif; struct request *rq; + blk_status_t ret; LIST_HEAD(list); - spin_lock_irq(&hwif->lock); - if (!list_empty(&drive->rq_list)) - list_splice_init(&drive->rq_list, &list); - spin_unlock_irq(&hwif->lock); + blk_mq_quiesce_queue(drive->queue); - while (!list_empty(&list)) { - rq = list_first_entry(&list, struct request, queuelist); + ret = BLK_STS_OK; + spin_lock_irq(&hwif->lock); + while (!list_empty(&drive->rq_list)) { + rq = list_first_entry(&drive->rq_list, struct request, queuelist); list_del_init(&rq->queuelist); - blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); + + spin_unlock_irq(&hwif->lock); + ret = ide_issue_rq(drive, rq, true); + spin_lock_irq(&hwif->lock); } + spin_unlock_irq(&hwif->lock); + + blk_mq_unquiesce_queue(drive->queue); + + if (ret != BLK_STS_OK) + kblockd_schedule_work(&drive->rq_work); } static const u8 ide_hwif_to_major[] = diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 031d568b4972..4e339cfd0c54 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -27,9 +27,18 @@ #include <linux/iio/machine.h> #include <linux/iio/driver.h> -#define AXP288_ADC_EN_MASK 0xF1 -#define AXP288_ADC_TS_PIN_GPADC 0xF2 -#define AXP288_ADC_TS_PIN_ON 0xF3 +/* + * This mask enables all ADCs except for the battery temp-sensor (TS), that is + * left as-is to avoid breaking charging on devices without a temp-sensor. + */ +#define AXP288_ADC_EN_MASK 0xF0 +#define AXP288_ADC_TS_ENABLE 0x01 + +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) enum axp288_adc_id { AXP288_ADC_TS, @@ -44,6 +53,7 @@ enum axp288_adc_id { struct axp288_adc_info { int irq; struct regmap *regmap; + bool ts_enabled; }; static const struct iio_chan_spec axp288_adc_channels[] = { @@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } -static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, - unsigned long address) +/* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + */ +static int axp288_adc_set_ts(struct axp288_adc_info *info, + unsigned int mode, unsigned long address) { int ret; - /* channels other than GPADC do not need to switch TS pin */ + /* No need to switch the current-source if the TS pin is disabled */ + if (!info->ts_enabled) + return 0; + + /* Channels other than GPADC do not need the current source */ if (address != AXP288_GP_ADC_H) return 0; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); if (ret) return ret; /* When switching to the GPADC pin give things some time to settle */ - if (mode == AXP288_ADC_TS_PIN_GPADC) + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) usleep_range(6000, 10000); return 0; @@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, chan->address)) { dev_err(&indio_dev->dev, "GPADC mode\n"); ret = -EINVAL; break; } ret = axp288_adc_read_channel(val, chan->address, info->regmap); - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, chan->address)) dev_err(&indio_dev->dev, "TS pin restore\n"); break; @@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } -static int axp288_adc_set_state(struct regmap *regmap) +static int axp288_adc_initialize(struct axp288_adc_info *info) { - /* ADC should be always enabled for internal FG to function */ - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) - return -EIO; + int ret, adc_enable_val; + + /* + * Determine if the TS pin is enabled and set the TS current-source + * accordingly. + */ + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); + if (ret) + return ret; + + if (adc_enable_val & AXP288_ADC_TS_ENABLE) { + info->ts_enabled = true; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } else { + info->ts_enabled = false; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_OFF); + } + if (ret) + return ret; - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + /* Turn on the ADC for all channels except TS, leave TS as is */ + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); } static const struct iio_info axp288_adc_iio_info = { @@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = axp288_adc_set_state(axp20x->regmap); + ret = axp288_adc_initialize(info); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 184d686ebd99..8b4568edd5cb 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -41,6 +41,7 @@ #define ADS8688_VREF_MV 4096 #define ADS8688_REALBITS 16 +#define ADS8688_MAX_CHANNELS 8 /* * enum ads8688_range - ADS8688 reference voltage range @@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; - u16 buffer[8]; + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; int i, j = 0; for (i = 0; i < indio_dev->masklength; i++) { diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index a406ad31b096..3a20cb5d9bff 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c @@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_TEMP: - *val = 1; /* 0.01 */ - *val2 = 100; - break; + *val = 10; + return IIO_VAL_INT; case IIO_PH: *val = 1; /* 0.001 */ *val2 = 1000; @@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct atlas_data *data = iio_priv(indio_dev); - __be32 reg = cpu_to_be32(val); + __be32 reg = cpu_to_be32(val / 10); if (val2 != 0 || val < 0 || val > 20000) return -EINVAL; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 3cd830d52967..616734313f0c 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, #endif struct ib_device *ib_device_get_by_index(u32 ifindex); -void ib_device_put(struct ib_device *device); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 8872453e26c0..238ec42778ef 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index) down_read(&lists_rwsem); device = __ib_device_get_by_index(index); if (device) { - /* Do not return a device if unregistration has started. */ - if (!refcount_inc_not_zero(&device->refcount)) + if (!ib_device_try_get(device)) device = NULL; } up_read(&lists_rwsem); return device; } +/** + * ib_device_put - Release IB device reference + * @device: device whose reference to be released + * + * ib_device_put() releases reference to the IB device to allow it to be + * unregistered and eventually free. + */ void ib_device_put(struct ib_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->unreg_completion); } +EXPORT_SYMBOL(ib_device_put); static struct ib_device *__ib_device_get_by_name(const char *name) { @@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size) rwlock_init(&device->client_data_lock); INIT_LIST_HEAD(&device->client_data_list); INIT_LIST_HEAD(&device->port_list); - refcount_set(&device->refcount, 1); init_completion(&device->unreg_completion); return device; @@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name, goto cg_cleanup; } + refcount_set(&device->refcount, 1); device->reg_state = IB_DEV_REGISTERED; list_for_each_entry(client, &client_list, list) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index a4ec43093cb3..acb882f279cb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, umem->writable = 1; umem->is_odp = 1; odp_data->per_mm = per_mm; + umem->owning_mm = per_mm->mm; + mmgrab(umem->owning_mm); mutex_init(&odp_data->umem_mutex); init_completion(&odp_data->notifier_completion); @@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, out_page_list: vfree(odp_data->page_list); out_odp_data: + mmdrop(umem->owning_mm); kfree(odp_data); return ERR_PTR(ret); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2890a77339e1..5f366838b7ff 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + if (file->async_file) + kref_put(&file->async_file->ref, + ib_uverbs_release_async_event_file); put_device(&file->device->dev); kfree(file); } @@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); - if (!list_empty(&ufile->umaps)) { - mm = list_first_entry(&ufile->umaps, - struct rdma_umap_priv, list) - ->vma->vm_mm; - mmget(mm); + while (!list_empty(&ufile->umaps)) { + int ret; + + priv = list_first_entry(&ufile->umaps, + struct rdma_umap_priv, list); + mm = priv->vma->vm_mm; + ret = mmget_not_zero(mm); + if (!ret) { + list_del_init(&priv->list); + mm = NULL; + continue; + } + break; } mutex_unlock(&ufile->umap_lock); if (!mm) @@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) list_del_init(&file->list); mutex_unlock(&file->device->lists_mutex); - if (file->async_file) - kref_put(&file->async_file->ref, - ib_uverbs_release_async_event_file); - kref_put(&file->ref, ib_uverbs_release_file); return 0; diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 5030ec480370..2a3f2f01028d 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr, static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( struct uverbs_attr_bundle *attrs) { - struct ib_device *ib_dev = attrs->ufile->device->ib_dev; + struct ib_device *ib_dev; struct ib_port_attr attr = {}; struct ib_uverbs_query_port_resp_ex resp = {}; + struct ib_ucontext *ucontext; int ret; u8 port_num; + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ if (!ib_dev->ops.query_port) return -EOPNOTSUPP; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index c13c0ba30f63..d499cd61c0e8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, static int c4iw_rdev_open(struct c4iw_rdev *rdev) { int err; + unsigned int factor; c4iw_init_dev_ucontext(rdev, &rdev->uctx); @@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) return -EINVAL; } - rdev->qpmask = rdev->lldi.udb_density - 1; - rdev->cqmask = rdev->lldi.ucq_density - 1; + /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */ + if (rdev->lldi.sge_host_page_size > PAGE_SIZE) { + pr_err("%s: unsupported sge host page size %u\n", + pci_name(rdev->lldi.pdev), + rdev->lldi.sge_host_page_size); + return -EINVAL; + } + + factor = PAGE_SIZE / rdev->lldi.sge_host_page_size; + rdev->qpmask = (rdev->lldi.udb_density * factor) - 1; + rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1; + pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index c22ebc774a6a..f9a7e9d29c8b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) vmf = 1; break; case STATUS: - if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { + if (flags & VM_WRITE) { ret = -EPERM; goto done; } diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 88242fe95eaa..bf96067876c9 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = packet->ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 960b1946c365..12deacf442cf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_ib_create_srq_resp resp = {}; struct hns_roce_srq *srq; int srq_desc_size; int srq_buf_size; @@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, srq->event = hns_roce_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->srqn; + resp.srqn = srq->srqn; if (udata) { - if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &resp, + min(udata->outlen, sizeof(resp)))) { ret = -EFAULT; - goto err_wrid; + goto err_srqc_alloc; } } return &srq->ibsrq; +err_srqc_alloc: + hns_roce_srq_free(hr_dev, srq); + err_wrid: kvfree(srq->wrid); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 25439da8976c..936ee1314bcd 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); if (sqp->tx_ring[wire_tx_ix].ah) - rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); + mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, @@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; @@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index e8a1e4498e3f..798591a18484 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = { UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), UAPI_DEF_CHAIN_OBJ_TREE( UVERBS_OBJECT_FLOW, - &mlx5_ib_fs, - UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), + &mlx5_ib_fs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_actions), {}, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 01e0f6200631..4ee32964e1dd 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) struct prefetch_mr_work *w = container_of(work, struct prefetch_mr_work, work); - if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) + if (ib_device_try_get(&w->dev->ib_dev)) { mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, w->num_sge); - + ib_device_put(&w->dev->ib_dev); + } + put_device(&w->dev->ib_dev.dev); kfree(w); } @@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, num_sge); - if (dev->ib_dev.reg_state != IB_DEV_REGISTERED) - return -ENODEV; - work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); if (!work) return -ENOMEM; memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); + get_device(&dev->ib_dev.dev); work->dev = dev; work->pf_flags = pf_flags; work->num_sge = num_sge; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index dd2ae640bc84..7db778d96ef5 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, } if (!check_flags_mask(ucmd.flags, + MLX5_QP_FLAG_ALLOW_SCATTER_CQE | + MLX5_QP_FLAG_BFREG_INDEX | + MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE | + MLX5_QP_FLAG_SCATTER_CQE | MLX5_QP_FLAG_SIGNATURE | - MLX5_QP_FLAG_SCATTER_CQE | - MLX5_QP_FLAG_TUNNEL_OFFLOADS | - MLX5_QP_FLAG_BFREG_INDEX | - MLX5_QP_FLAG_TYPE_DCT | - MLX5_QP_FLAG_TYPE_DCI | - MLX5_QP_FLAG_ALLOW_SCATTER_CQE | - MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)) + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC | + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | + MLX5_QP_FLAG_TUNNEL_OFFLOADS | + MLX5_QP_FLAG_TYPE_DCI | + MLX5_QP_FLAG_TYPE_DCT)) return -EINVAL; err = get_qp_user_index(to_mucontext(pd->uobject->context), diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 868da0ece7ba..445ea19a2ec8 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a1bd8cfc2c25..c6cc3e4ab71d 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -2910,6 +2910,8 @@ send: goto op_err; if (!ret) goto rnr_nak; + if (wqe->length > qp->r_len) + goto inv_err; break; case IB_WR_RDMA_WRITE_WITH_IMM: @@ -3078,7 +3080,10 @@ op_err: goto err; inv_err: - send_status = IB_WC_REM_INV_REQ_ERR; + send_status = + sqp->ibqp.qp_type == IB_QPT_RC ? + IB_WC_REM_INV_REQ_ERR : + IB_WC_SUCCESS; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1da119d901a9..73e808c1e6ad 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -248,7 +248,6 @@ struct ipoib_cm_tx { struct list_head list; struct net_device *dev; struct ipoib_neigh *neigh; - struct ipoib_path *path; struct ipoib_tx_buf *tx_ring; unsigned int tx_head; unsigned int tx_tail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0428e01e8f69..aa9dcfc36cd3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path neigh->cm = tx; tx->neigh = neigh; - tx->path = path; tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); @@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) neigh->daddr + QPN_AND_OPTIONS_OFFSET); goto free_neigh; } - memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); + memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 31d91538bbf4..694324b37480 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; - int i, j; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); @@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) if (status) return FAILED; - for (i = 0; i < target->ch_count; i++) { - ch = &target->ch[i]; - for (j = 0; j < target->req_ring_size; ++j) { - struct srp_request *req = &ch->req_ring[j]; - - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); - } - } - return SUCCESS; } diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbb..a878351f1643 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -420,7 +420,7 @@ config KEYBOARD_MPR121 config KEYBOARD_SNVS_PWRKEY tristate "IMX SNVS Power Key Driver" - depends on SOC_IMX6SX + depends on SOC_IMX6SX || SOC_IMX7D depends on OF help This is the snvs powerkey driver for the Freescale i.MX application diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597..73686c2460ce 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -75,9 +75,7 @@ struct cap11xx_led { struct cap11xx_priv *priv; struct led_classdev cdev; - struct work_struct work; u32 reg; - enum led_brightness new_brightness; }; #endif @@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) } #ifdef CONFIG_LEDS_CLASS -static void cap11xx_led_work(struct work_struct *work) +static int cap11xx_led_set(struct led_classdev *cdev, + enum led_brightness value) { - struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); + struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); struct cap11xx_priv *priv = led->priv; - int value = led->new_brightness; /* - * All LEDs share the same duty cycle as this is a HW limitation. - * Brightness levels per LED are either 0 (OFF) and 1 (ON). + * All LEDs share the same duty cycle as this is a HW + * limitation. Brightness levels per LED are either + * 0 (OFF) and 1 (ON). */ - regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, - BIT(led->reg), value ? BIT(led->reg) : 0); -} - -static void cap11xx_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); - - if (led->new_brightness == value) - return; - - led->new_brightness = value; - schedule_work(&led->work); + return regmap_update_bits(priv->regmap, + CAP11XX_REG_LED_OUTPUT_CONTROL, + BIT(led->reg), + value ? BIT(led->reg) : 0); } static int cap11xx_init_leds(struct device *dev, @@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, led->cdev.default_trigger = of_get_property(child, "linux,default-trigger", NULL); led->cdev.flags = 0; - led->cdev.brightness_set = cap11xx_led_set; + led->cdev.brightness_set_blocking = cap11xx_led_set; led->cdev.max_brightness = 1; led->cdev.brightness = LED_OFF; @@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, led->reg = reg; led->priv = priv; - INIT_WORK(&led->work, cap11xx_led_work); - error = devm_led_classdev_register(dev, &led->cdev); if (error) { of_node_put(child); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 403452ef00e6..3d1cb7bf5e35 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) keypad->stopped = true; spin_unlock_irq(&keypad->lock); - flush_work(&keypad->work.work); + flush_delayed_work(&keypad->work); /* * matrix_keypad_scan() will leave IRQs enabled; * we should disable them now. diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index 43b86482dda0..d466bc07aebb 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c @@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = { struct qt2160_led { struct qt2160_data *qt2160; struct led_classdev cdev; - struct work_struct work; char name[32]; int id; - enum led_brightness new_brightness; + enum led_brightness brightness; }; #endif @@ -74,7 +73,6 @@ struct qt2160_data { u16 key_matrix; #ifdef CONFIG_LEDS_CLASS struct qt2160_led leds[QT2160_NUM_LEDS_X]; - struct mutex led_lock; #endif }; @@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data); #ifdef CONFIG_LEDS_CLASS -static void qt2160_led_work(struct work_struct *work) +static int qt2160_led_set(struct led_classdev *cdev, + enum led_brightness value) { - struct qt2160_led *led = container_of(work, struct qt2160_led, work); + struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); struct qt2160_data *qt2160 = led->qt2160; struct i2c_client *client = qt2160->client; - int value = led->new_brightness; u32 drive, pwmen; - mutex_lock(&qt2160->led_lock); - - drive = qt2160_read(client, QT2160_CMD_DRIVE_X); - pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); - if (value != LED_OFF) { - drive |= (1 << led->id); - pwmen |= (1 << led->id); - - } else { - drive &= ~(1 << led->id); - pwmen &= ~(1 << led->id); - } - qt2160_write(client, QT2160_CMD_DRIVE_X, drive); - qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); + if (value != led->brightness) { + drive = qt2160_read(client, QT2160_CMD_DRIVE_X); + pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); + if (value != LED_OFF) { + drive |= BIT(led->id); + pwmen |= BIT(led->id); - /* - * Changing this register will change the brightness - * of every LED in the qt2160. It's a HW limitation. - */ - if (value != LED_OFF) - qt2160_write(client, QT2160_CMD_PWM_DUTY, value); + } else { + drive &= ~BIT(led->id); + pwmen &= ~BIT(led->id); + } + qt2160_write(client, QT2160_CMD_DRIVE_X, drive); + qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); - mutex_unlock(&qt2160->led_lock); -} + /* + * Changing this register will change the brightness + * of every LED in the qt2160. It's a HW limitation. + */ + if (value != LED_OFF) + qt2160_write(client, QT2160_CMD_PWM_DUTY, value); -static void qt2160_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); + led->brightness = value; + } - led->new_brightness = value; - schedule_work(&led->work); + return 0; } #endif /* CONFIG_LEDS_CLASS */ @@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160) int ret; int i; - mutex_init(&qt2160->led_lock); - for (i = 0; i < QT2160_NUM_LEDS_X; i++) { struct qt2160_led *led = &qt2160->leds[i]; snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); led->cdev.name = led->name; - led->cdev.brightness_set = qt2160_led_set; + led->cdev.brightness_set_blocking = qt2160_led_set; led->cdev.brightness = LED_OFF; led->id = i; led->qt2160 = qt2160; - INIT_WORK(&led->work, qt2160_led_work); - ret = led_classdev_register(&client->dev, &led->cdev); if (ret < 0) return ret; @@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160) { int i; - for (i = 0; i < QT2160_NUM_LEDS_X; i++) { + for (i = 0; i < QT2160_NUM_LEDS_X; i++) led_classdev_unregister(&qt2160->leds[i].cdev); - cancel_work_sync(&qt2160->leds[i].work); - } } #else diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f..3b85631fde91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) input_dev->id.bustype = BUS_HOST; + keypad_data->input_dev = input_dev; + error = keypad_matrix_key_parse_dt(keypad_data); if (error) return error; @@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad_data); - keypad_data->input_dev = input_dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); keypad_data->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(keypad_data->base)) diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c index 094bddf56755..c1e66f45d552 100644 --- a/drivers/input/misc/apanel.c +++ b/drivers/input/misc/apanel.c @@ -22,7 +22,6 @@ #include <linux/io.h> #include <linux/input-polldev.h> #include <linux/i2c.h> -#include <linux/workqueue.h> #include <linux/leds.h> #define APANEL_NAME "Fujitsu Application Panel" @@ -59,8 +58,6 @@ struct apanel { struct i2c_client *client; unsigned short keymap[MAX_PANEL_KEYS]; u16 nkeys; - u16 led_bits; - struct work_struct led_work; struct led_classdev mail_led; }; @@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev) report_key(idev, ap->keymap[i]); } -/* Track state changes of LED */ -static void led_update(struct work_struct *work) -{ - struct apanel *ap = container_of(work, struct apanel, led_work); - - i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); -} - -static void mail_led_set(struct led_classdev *led, +static int mail_led_set(struct led_classdev *led, enum led_brightness value) { struct apanel *ap = container_of(led, struct apanel, mail_led); + u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000; - if (value != LED_OFF) - ap->led_bits |= 0x8000; - else - ap->led_bits &= ~0x8000; - - schedule_work(&ap->led_work); + return i2c_smbus_write_word_data(ap->client, 0x10, led_bits); } static int apanel_remove(struct i2c_client *client) @@ -179,7 +164,7 @@ static struct apanel apanel = { }, .mail_led = { .name = "mail:blue", - .brightness_set = mail_led_set, + .brightness_set_blocking = mail_led_set, }, }; @@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client, if (err) goto out3; - INIT_WORK(&ap->led_work, led_update); if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { err = led_classdev_register(&client->dev, &ap->mail_led); if (err) diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a8..dd9dd4e40827 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c @@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) idev->close = bma150_irq_close; input_set_drvdata(idev, bma150); + bma150->input = idev; + error = input_register_device(idev); if (error) { input_free_device(idev); return error; } - bma150->input = idev; return 0; } @@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) bma150_init_input_device(bma150, ipoll_dev->input); + bma150->input_polled = ipoll_dev; + bma150->input = ipoll_dev->input; + error = input_register_polled_device(ipoll_dev); if (error) { input_free_polled_device(ipoll_dev); return error; } - bma150->input_polled = ipoll_dev; - bma150->input = ipoll_dev->input; - return 0; } diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550..dbb6d9e1b947 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c @@ -34,6 +34,7 @@ struct pwm_vibrator { struct work_struct play_work; u16 level; u32 direction_duty_cycle; + bool vcc_on; }; static int pwm_vibrator_start(struct pwm_vibrator *vibrator) @@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) struct pwm_state state; int err; - err = regulator_enable(vibrator->vcc); - if (err) { - dev_err(pdev, "failed to enable regulator: %d", err); - return err; + if (!vibrator->vcc_on) { + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + vibrator->vcc_on = true; } pwm_get_state(vibrator->pwm, &state); @@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) { - regulator_disable(vibrator->vcc); - if (vibrator->pwm_dir) pwm_disable(vibrator->pwm_dir); pwm_disable(vibrator->pwm); + + if (vibrator->vcc_on) { + regulator_disable(vibrator->vcc); + vibrator->vcc_on = false; + } } static void pwm_vibrator_play_work(struct work_struct *work) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index f322a1768fbb..225ae6980182 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, - { "ELAN0501", 0 }, { "ELAN0600", 0 }, { "ELAN0602", 0 }, { "ELAN0605", 0 }, @@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0617", 0 }, { "ELAN0618", 0 }, { "ELAN061C", 0 }, { "ELAN061D", 0 }, diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 9fe075c137dc..a7f8b1614559 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) + * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons @@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, + { + /* Fujitsu H780 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), + }, + }, #endif { } }; diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index bae08226e3d9..a7cfab3db9ee 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c @@ -23,7 +23,6 @@ #include <linux/of.h> #include <linux/slab.h> #include <linux/delay.h> -#include <linux/clk.h> /* * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. @@ -75,7 +74,6 @@ struct olpc_apsp { struct serio *kbio; struct serio *padio; void __iomem *base; - struct clk *clk; int open_count; int irq; }; @@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port) struct olpc_apsp *priv = port->port_data; unsigned int tmp; unsigned long l; - int error; if (priv->open_count++ == 0) { - error = clk_prepare_enable(priv->clk); - if (error) - return error; - l = readl(priv->base + COMMAND_FIFO_STATUS); if (!(l & CMD_STS_MASK)) { dev_err(priv->dev, "SP cannot accept commands.\n"); - clk_disable_unprepare(priv->clk); return -EIO; } @@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port) /* Disable interrupt 0 */ tmp = readl(priv->base + PJ_INTERRUPT_MASK); writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); - - clk_disable_unprepare(priv->clk); } } @@ -208,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev) if (priv->irq < 0) return priv->irq; - priv->clk = devm_clk_get(&pdev->dev, "sp"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - /* KEYBOARD */ kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!kb_serio) diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb1..5e8d8384aa2a 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c @@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) { struct ps2_gpio_data *drvdata = serio->port_data; + flush_delayed_work(&drvdata->tx_work); disable_irq(drvdata->irq); } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 87ba23a75b38..2a7b78bb98b4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data, static void do_detach(struct iommu_dev_data *dev_data) { + struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; alias = dev_data->alias; - /* decrease reference counters */ - dev_data->domain->dev_iommu[iommu->index] -= 1; - dev_data->domain->dev_cnt -= 1; - /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); @@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data) /* Flush the DTE entry */ device_flush_dte(dev_data); + + /* Flush IOTLB */ + domain_flush_tlb_pde(domain); + + /* Wait for the flushes to finish */ + domain_flush_complete(domain); + + /* decrease reference counters - needs to happen after the flushes */ + domain->dev_iommu[iommu->index] -= 1; + domain->dev_cnt -= 1; } /* @@ -2617,13 +2624,13 @@ out_unmap: bus_addr = address + s->dma_address + (j << PAGE_SHIFT); iommu_unmap_page(domain, bus_addr, PAGE_SIZE); - if (--mapped_pages) + if (--mapped_pages == 0) goto out_free_iova; } } out_free_iova: - free_iova_fast(&dma_dom->iovad, address, npages); + free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); out_err: return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2bd9ac285c0d..78188bf7e90d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -363,7 +363,7 @@ static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; -static int intel_iommu_sm = 1; +static int intel_iommu_sm; static int iommu_identity_mapping; #define IDENTMAP_ALL 1 @@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str) } else if (!strncmp(str, "sp_off", 6)) { pr_info("Disable supported super page\n"); intel_iommu_superpage = 0; - } else if (!strncmp(str, "sm_off", 6)) { - pr_info("Intel-IOMMU: disable scalable mode support\n"); - intel_iommu_sm = 0; + } else if (!strncmp(str, "sm_on", 5)) { + pr_info("Intel-IOMMU: scalable mode supported\n"); + intel_iommu_sm = 1; } else if (!strncmp(str, "tboot_noforce", 13)) { printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); @@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev, struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) { - if (entry->type == IOMMU_RESV_RESERVED) + if (entry->type == IOMMU_RESV_MSI) kfree(entry); } } diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 730f7dabcf37..7e0df67bd3e9 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev) iommu_spec.args_count = count; mtk_iommu_create_mapping(dev, &iommu_spec); + + /* dev->iommu_fwspec might have changed */ + fwspec = dev_iommu_fwspec_get(dev); + of_node_put(iommu_spec.np); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7f2a45445b00..c3aba3fc818d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -97,9 +97,14 @@ struct its_device; * The ITS structure - contains most of the infrastructure, with the * top-level MSI domain, the command queue, the collections, and the * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. */ struct its_node { raw_spinlock_t lock; + struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; phys_addr_t phys_base; @@ -156,6 +161,7 @@ struct its_device { void *itt; u32 nr_ites; u32 device_id; + bool shared; }; static struct { @@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) nr_irqs /= 2; } while (nr_irqs > 0); + if (!nr_irqs) + err = -ENOSPC; + if (err) goto out; @@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void) return 0; } +static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + return val; +} + static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); @@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); + if (gic_rdists->has_vlpis) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * sheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base); + WARN_ON(val & GICR_VPENDBASER_Dirty); + } + /* Make sure the GIC has seen the above */ dsb(sy); out: @@ -2422,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, struct its_device *its_dev; struct msi_domain_info *msi_info; u32 dev_id; + int err = 0; /* * We ignore "dev" entierely, and rely on the dev_id that has @@ -2444,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, return -EINVAL; } + mutex_lock(&its->dev_alloc_lock); its_dev = its_find_device(its, dev_id); if (its_dev) { /* @@ -2451,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, * another alias (PCI bridge of some sort). No need to * create the device. */ + its_dev->shared = true; pr_debug("Reusing ITT for devID %x\n", dev_id); goto out; } its_dev = its_create_device(its, dev_id, nvec, true); - if (!its_dev) - return -ENOMEM; + if (!its_dev) { + err = -ENOMEM; + goto out; + } pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); out: + mutex_unlock(&its->dev_alloc_lock); info->scratchpad[0].ptr = its_dev; - return 0; + return err; } static struct msi_domain_ops its_msi_domain_ops = { @@ -2566,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; int i; for (i = 0; i < nr_irqs; i++) { @@ -2580,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_reset_irq_data(data); } - /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->event_map.lpi_map, + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditionned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, its_dev->event_map.nr_lpis)) { its_lpi_free(its_dev->event_map.lpi_map, its_dev->event_map.lpi_base, @@ -2593,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, its_free_device(its_dev); } + mutex_unlock(&its->dev_alloc_lock); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); } @@ -2755,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe) static void its_vpe_deschedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - u32 count = 1000000; /* 1s! */ - bool clean; u64 val; - /* We're being scheduled out */ - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - - do { - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - clean = !(val & GICR_VPENDBASER_Dirty); - if (!clean) { - count--; - cpu_relax(); - udelay(1); - } - } while (!clean && count); + val = its_clear_vpend_valid(vlpi_base); - if (unlikely(!clean && !count)) { + if (unlikely(val & GICR_VPENDBASER_Dirty)) { pr_err_ratelimited("ITS virtual pending table not cleaning\n"); vpe->idai = false; vpe->pending_last = true; @@ -3517,6 +3573,7 @@ static int __init its_probe_one(struct resource *res, } raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->its_device_list); typer = gic_read_typer(its_base + GITS_TYPER); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d7764..3496b61a312a 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -34,6 +34,9 @@ #define SEL_INT_PENDING (1 << 6) #define SEL_INT_NUM_MASK 0x3f +#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) +#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) + struct icu_chip_data { int nr_irqs; unsigned int virq_base; @@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp2_conf = { .conf_enable = 0x20, .conf_disable = 0x0, - .conf_mask = 0x7f, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, }; static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 5385f5768345..27933338f7b3 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENG); - } else { - mask = __this_cpu_read(cached_irq_mask) & ~mask; - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG); + return; + } } + mask = __this_cpu_read(cached_irq_mask) & ~mask; + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_unmask(struct irq_data *d) @@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENGSET); - } else { - mask |= __this_cpu_read(cached_irq_mask); - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET); + return; + } } + mask |= __this_cpu_read(cached_irq_mask); + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_enable(struct irq_data *d) @@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d) static int xtensa_mx_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = 1u << d->hwirq; + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index c200234dd2c9..ab12328be5ee 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d) static int xtensa_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = 1u << d->hwirq; + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10..578978711887 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) spin_lock_irqsave(&timer->dev->lock, flags); if (timer->id >= 0) list_move_tail(&timer->list, &timer->dev->expired); - spin_unlock_irqrestore(&timer->dev->lock, flags); wake_up_interruptible(&timer->dev->wait); + spin_unlock_irqrestore(&timer->dev->lock, flags); } static int diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ - writel_relaxed(0x0, ring + RING_CONTROL); + writel_relaxed(0x0, ring->regs + RING_CONTROL); do { - if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) return ret; } +EXPORT_SYMBOL_GPL(mbox_flush); /** * mbox_request_channel - Request a mailbox channel. diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 47d4e0d30bf0..dd538e6b2748 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) if (IS_ERR(bip)) return PTR_ERR(bip); - tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); + tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); bip->bip_iter.bi_size = tag_len; bip->bip_iter.bi_sector = io->cc->start + io->sector; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4eb5f8c56535..a20531e5f3b4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) static void rq_completed(struct mapped_device *md) { /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); /* diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..e83b63608262 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -257,6 +257,7 @@ struct pool { spinlock_t lock; struct bio_list deferred_flush_bios; + struct bio_list deferred_flush_completions; struct list_head prepared_mappings; struct list_head prepared_discards; struct list_head prepared_discards_pt2; @@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) mempool_free(m, &m->tc->pool->mapping_pool); } +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) +{ + struct pool *pool = tc->pool; + unsigned long flags; + + /* + * If the bio has the REQ_FUA flag set we must commit the metadata + * before signaling its completion. + */ + if (!bio_triggers_commit(tc, bio)) { + bio_endio(bio); + return; + } + + /* + * Complete bio with an error if earlier I/O caused changes to the + * metadata that can't be committed, e.g, due to I/O errors on the + * metadata device. + */ + if (dm_thin_aborted_changes(tc->td)) { + bio_io_error(bio); + return; + } + + /* + * Batch together any bios that trigger commits and then issue a + * single commit for them in process_deferred_bios(). + */ + spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_flush_completions, bio); + spin_unlock_irqrestore(&pool->lock, flags); +} + static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; @@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio); + complete_overwrite_bio(tc, bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) { unsigned long flags; struct bio *bio; - struct bio_list bios; + struct bio_list bios, bio_completions; struct thin_c *tc; tc = get_first_thin(pool); @@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) } /* - * If there are any deferred flush bios, we must commit - * the metadata before issuing them. + * If there are any deferred flush bios, we must commit the metadata + * before issuing them or signaling their completion. */ bio_list_init(&bios); + bio_list_init(&bio_completions); + spin_lock_irqsave(&pool->lock, flags); bio_list_merge(&bios, &pool->deferred_flush_bios); bio_list_init(&pool->deferred_flush_bios); + + bio_list_merge(&bio_completions, &pool->deferred_flush_completions); + bio_list_init(&pool->deferred_flush_completions); spin_unlock_irqrestore(&pool->lock, flags); - if (bio_list_empty(&bios) && + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) return; if (commit(pool)) { + bio_list_merge(&bios, &bio_completions); + while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; } pool->last_commit_jiffies = jiffies; + while ((bio = bio_list_pop(&bio_completions))) + bio_endio(bio); + while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } @@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); + bio_list_init(&pool->deferred_flush_completions); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); INIT_LIST_HEAD(&pool->prepared_discards_pt2); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b53c3841b53..515e6af9bed2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io) true, duration, &io->stats_aux); /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); } @@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, return r; } - bio_trim(clone, sector - clone->bi_iter.bi_sector, len); + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); + clone->bi_iter.bi_size = to_bytes(len); + + if (bio_integrity(bio)) + bio_integrity_trim(clone); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fa47249fa3e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio) reschedule_retry(r1_bio); } +static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) +{ + sector_t sync_blocks = 0; + sector_t s = r1_bio->sector; + long sectors_to_go = r1_bio->sectors; + + /* make sure these bits don't get cleared. */ + do { + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); + s += sync_blocks; + sectors_to_go -= sync_blocks; + } while (sectors_to_go > 0); +} + static void end_sync_write(struct bio *bio) { int uptodate = !bio->bi_status; @@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio) struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { - sector_t sync_blocks = 0; - sector_t s = r1_bio->sector; - long sectors_to_go = r1_bio->sectors; - /* make sure these bits doesn't get cleared. */ - do { - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); - s += sync_blocks; - sectors_to_go -= sync_blocks; - } while (sectors_to_go > 0); + abort_sync_write(mddev, r1_bio); set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & @@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + abort_sync_write(mddev, r1_bio); continue; + } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ec3a5ef7fee0..cbbe6b6535be 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1935,12 +1935,14 @@ out: } static struct stripe_head * -r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect) +r5c_recovery_alloc_stripe( + struct r5conf *conf, + sector_t stripe_sect, + int noblock) { struct stripe_head *sh; - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); if (!sh) return NULL; /* no more stripe available */ @@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect); + conf, stripe_sect, 1); } if (!sh) { + int new_size = conf->min_nr_stripes * 2; pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", mdname(mddev), - conf->min_nr_stripes * 2); - raid5_set_cache_size(mddev, - conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe(conf, - stripe_sect); + new_size); + ret = raid5_set_cache_size(mddev, new_size); + if (conf->min_nr_stripes <= new_size / 2) { + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", + mdname(mddev), + ret, + new_size, + conf->min_nr_stripes, + conf->max_nr_stripes); + return -ENOMEM; + } + sh = r5c_recovery_alloc_stripe( + conf, stripe_sect, 0); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", - mdname(mddev)); + mdname(mddev)); return -ENOMEM; } list_add_tail(&sh->lru, cached_stripe_list); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4990f0319f6c..cecea901ab8c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page) int raid5_set_cache_size(struct mddev *mddev, int size) { + int result = 0; struct r5conf *conf = mddev->private; if (size <= 16 || size > 32768) @@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) - if (!grow_one_stripe(conf, GFP_KERNEL)) + if (!grow_one_stripe(conf, GFP_KERNEL)) { + conf->min_nr_stripes = conf->max_nr_stripes; + result = -ENOMEM; break; + } mutex_unlock(&conf->cache_size_mutex); - return 0; + return result; } EXPORT_SYMBOL(raid5_set_cache_size); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f461460a2aeb..76f9909cf396 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1419,7 +1419,7 @@ config MFD_TPS65217 config MFD_TPS68470 bool "TI TPS68470 Power Management / LED chips" - depends on ACPI && I2C=y + depends on ACPI && PCI && I2C=y select MFD_CORE select REGMAP_I2C select I2C_DESIGNWARE_PLATFORM diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1fc8ea0f519b..ca4c9cc218a2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (cl == cb->cl) + if (cl == cb->cl) { list_del_init(&cb->list); + if (cb->fop_type == MEI_FOP_READ) + mei_io_cb_free(cb); + } } } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 23739a60517f..bb1ee9834a02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -139,6 +139,8 @@ #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index e89497f858ae..3ab946ad3257 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 2bfa3a903bf9..744757f541be 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -47,7 +47,8 @@ * @dc: Virtio device control * @vpdev: VOP device which is the parent for this virtio device * @vr: Buffer for accessing the VRING - * @used: Buffer for used + * @used_virt: Virtual address of used ring + * @used: DMA address of used ring * @used_size: Size of the used buffer * @reset_done: Track whether VOP reset is complete * @virtio_cookie: Cookie returned upon requesting a interrupt @@ -61,6 +62,7 @@ struct _vop_vdev { struct mic_device_ctrl __iomem *dc; struct vop_device *vpdev; void __iomem *vr[VOP_MAX_VRINGS]; + void *used_virt[VOP_MAX_VRINGS]; dma_addr_t used[VOP_MAX_VRINGS]; int used_size[VOP_MAX_VRINGS]; struct completion reset_done; @@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq) static void vop_del_vq(struct virtqueue *vq, int n) { struct _vop_vdev *vdev = to_vopvdev(vq->vdev); - struct vring *vr = (struct vring *)(vq + 1); struct vop_device *vpdev = vdev->vpdev; dma_unmap_single(&vpdev->dev, vdev->used[n], vdev->used_size[n], DMA_BIDIRECTIONAL); - free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); + free_pages((unsigned long)vdev->used_virt[n], + get_order(vdev->used_size[n])); vring_del_virtqueue(vq); vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); vdev->vr[n] = NULL; @@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev) vop_del_vq(vq, idx++); } +static struct virtqueue *vop_new_virtqueue(unsigned int index, + unsigned int num, + struct virtio_device *vdev, + bool context, + void *pages, + bool (*notify)(struct virtqueue *vq), + void (*callback)(struct virtqueue *vq), + const char *name, + void *used) +{ + bool weak_barriers = false; + struct vring vring; + + vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); + vring.used = used; + + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, + notify, callback, name); +} + /* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local @@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; - struct vring *vr; u8 type = ioread8(&vdev->desc->type); if (index >= ioread8(&vdev->desc->num_vq)) @@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, return ERR_PTR(-ENOMEM); vdev->vr[index] = va; memset_io(va, 0x0, _vr_size); - vq = vring_new_virtqueue( - index, - le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, - dev, - false, - ctx, - (void __force *)va, vop_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto unmap; - } + info = va + _vr_size; magic = ioread32(&info->magic); @@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, goto unmap; } - /* Allocate and reassign used ring now */ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(vdev->used_size[index])); + vdev->used_virt[index] = used; if (!used) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto del_vq; + goto unmap; + } + + vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, + (void __force *)va, vop_notify, callback, + name, used); + if (!vq) { + err = -ENOMEM; + goto free_used; } + vdev->used[index] = dma_map_single(&vpdev->dev, used, vdev->used_size[index], DMA_BIDIRECTIONAL); @@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto free_used; + goto del_vq; } writeq(vdev->used[index], &vqconfig->used_address); - /* - * To reassign the used ring here we are directly accessing - * struct vring_virtqueue which is a private data structure - * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in - * vring_new_virtqueue() would ensure that - * (&vq->vring == (struct vring *) (&vq->vq + 1)); - */ - vr = (struct vring *)(vq + 1); - vr->used = used; vq->priv = vdev; return vq; +del_vq: + vring_del_virtqueue(vq); free_used: free_pages((unsigned long)used, get_order(vdev->used_size[index])); -del_vq: - vring_del_virtqueue(vq); unmap: vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); return ERR_PTR(err); @@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, int ret = -1; if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { + struct device *dev = get_device(&vdev->vdev.dev); + dev_dbg(&vpdev->dev, "%s %d config_change %d type %d vdev %p\n", __func__, __LINE__, @@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, iowrite8(-1, &dc->h2c_vdev_db); if (status & VIRTIO_CONFIG_S_DRIVER_OK) wait_for_completion(&vdev->reset_done); - put_device(&vdev->vdev.dev); + put_device(dev); iowrite8(1, &dc->guest_ack); dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", __func__, __LINE__, ioread8(&dc->guest_ack)); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..14f3fdb8c6bb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) if (waiting) wake_up(&mq->wait); else - kblockd_schedule_work(&mq->complete_work); + queue_work(mq->card->complete_wq, &mq->complete_work); return; } @@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card) mmc_fixup_device(card, mmc_blk_fixups); + card->complete_wq = alloc_workqueue("mmc_complete", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (unlikely(!card->complete_wq)) { + pr_err("Failed to create mmc completion workqueue"); + return -ENOMEM; + } + md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); @@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + destroy_workqueue(card->complete_wq); } static int _mmc_blk_suspend(struct mmc_card *card) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 50293529d6de..c9e7aa50bb0a 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev) err: dev_dbg(dev, "%s -> err %d\n", __func__, ret); + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); mmc_free_host(mmc); return ret; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) host->regs + SD_EMMC_IRQ_EN); ret = request_threaded_irq(host->irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, NULL, host); + meson_mmc_irq_thread, IRQF_SHARED, + dev_name(&pdev->dev), host); if (ret) goto err_init_clk; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8afeaf81ae66..833ef0590af8 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && host->dev_comp->hs400_tune) - sdr_set_field(host->base + PAD_CMD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..70fadc976795 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays || host->use_new_timings) + /* + * Some H5 devices do not have signal traces precise enough to + * use HS DDR mode for their eMMC chips. + * + * We still enable HS DDR modes for all the other controller + * variants that support them. + */ + if ((host->cfg->clk_delays || host->use_new_timings) && + !of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h5-emmc")) mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + ret = sunxi_mmc_init_host(host); if (ret) goto error_free_dma; diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 22f753e555ac..83f88b8b5d9f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev, * Going to have to check what details I need to set and how to * get them */ - mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 999b705769a8..3ef01baef9b6 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) { struct nvmem_config config = {}; + config.id = -1; config.dev = &mtd->dev; config.name = mtd->name; config.owner = THIS_MODULE; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 60104e1079c5..37f174ccbcec 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; + + /* Initialize ->erasesize to make add_mtd_device() happy. */ + slave->mtd.erasesize = parent->erasesize; + printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; @@ -632,7 +636,6 @@ err_remove_part: mutex_unlock(&mtd_partitions_mutex); free_partition(new); - pr_info("%s:%i\n", __func__, __LINE__); return ret; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index bd4cfac6b5aa..a4768df5083f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) /* * Reset BCH here, too. We got failures otherwise :( - * See later BCH reset for explanation of MX23 handling + * See later BCH reset for explanation of MX23 and MX28 handling */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. - * On the other hand, the MX28 needs the reset, because one case has been - * seen where the BCH produced ECC errors constantly after 10000 - * consecutive reboots. The latter case has not been seen on the MX23 - * yet, still we don't know if it could happen there as well. + * and MX28. */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index cca4b24d2ffa..839494ac457c 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip) /** * nand_fill_oob - [INTERN] Transfer client buffer to oob + * @chip: NAND chip object * @oob: oob data buffer * @len: oob data write length * @ops: oob ops structure diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 1b722fe9213c..19a2b563acdf 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td) /** * read_bbt - [GENERIC] Read the bad block table starting from page - * @chip: NAND chip object + * @this: NAND chip object * @buf: temporary buffer * @page: the starting page * @num: the number of bbt descriptors to read diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 479c2f2cf17f..fa87ae28cdfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, struct nand_device *nand = spinand_to_nand(spinand); struct mtd_info *mtd = nanddev_to_mtd(nand); struct nand_page_io_req adjreq = *req; - unsigned int nbytes = 0; - void *buf = NULL; + void *buf = spinand->databuf; + unsigned int nbytes; u16 column = 0; int ret; - memset(spinand->databuf, 0xff, - nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand)); + /* + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset + * the cache content to 0xFF (depends on vendor implementation), so we + * must fill the page cache entirely even if we only want to program + * the data portion of the page, otherwise we might corrupt the BBM or + * user data previously programmed in OOB area. + */ + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); + memset(spinand->databuf, 0xff, nbytes); + adjreq.dataoffs = 0; + adjreq.datalen = nanddev_page_size(nand); + adjreq.databuf.out = spinand->databuf; + adjreq.ooblen = nanddev_per_page_oobsize(nand); + adjreq.ooboffs = 0; + adjreq.oobbuf.out = spinand->oobbuf; - if (req->datalen) { + if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, req->datalen); - adjreq.dataoffs = 0; - adjreq.datalen = nanddev_page_size(nand); - adjreq.databuf.out = spinand->databuf; - nbytes = adjreq.datalen; - buf = spinand->databuf; - } if (req->ooblen) { if (req->mode == MTD_OPS_AUTO_OOB) @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, else memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, req->ooblen); - - adjreq.ooblen = nanddev_per_page_oobsize(nand); - adjreq.ooboffs = 0; - nbytes += nanddev_per_page_oobsize(nand); - if (!buf) { - buf = spinand->oobbuf; - column = nanddev_page_size(nand); - } } spinand_cache_op_adjust_colum(spinand, &adjreq, &column); @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, /* * We need to use the RANDOM LOAD CACHE operation if there's - * more than one iteration, because the LOAD operation resets - * the cache to 0xff. + * more than one iteration, because the LOAD operation might + * reset the cache to 0xff. */ if (nbytes) { column = op.addr.val; @@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand) for (i = 0; i < nand->memorg.ntargets; i++) { ret = spinand_select_target(spinand, i); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; } ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..21bf8ac78380 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -197,9 +197,9 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" - depends on INET && NET_UDP_TUNNEL + depends on INET depends on IPV6 || !IPV6 - select NET_IP_TUNNEL + select NET_UDP_TUNNEL select GRO_CELLS ---help--- This allows one to create geneve virtual interfaces that provide diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 485462d3087f..537c90c8eb0a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - /* Link-local multicast packets should be passed to the - * stack on the link they arrive as well as pass them to the - * bond-master device. These packets are mostly usable when - * stack receives it with the link on which they arrive - * (e.g. LLDP) they also must be available on master. Some of - * the use cases include (but are not limited to): LLDP agents - * that must be able to operate both on enslaved interfaces as - * well as on bonds themselves; linux bridges that must be able - * to process/pass BPDUs from attached bonds when any kind of - * STP version is enabled on the network. + /* + * For packets determined by bond_should_deliver_exact_match() call to + * be suppressed we want to make an exception for link-local packets. + * This is necessary for e.g. LLDP daemons to be able to monitor + * inactive slave links without being forced to bind to them + * explicitly. + * + * At the same time, packets that are passed to the bonding master + * (including link-local ones) can have their originating interface + * determined via PACKET_ORIGDEV socket option. */ - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); - - if (nskb) { - nskb->dev = bond->dev; - nskb->queue_mapping = 0; - netif_rx(nskb); - } - return RX_HANDLER_PASS; - } - if (bond_should_deliver_exact_match(skb, slave, bond)) + if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + return RX_HANDLER_PASS; return RX_HANDLER_EXACT; + } skb->dev = bond->dev; diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index a0f954f36c09..44e6c7b1b222 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser) if (skb->len == 0) { struct sk_buff *tmp = skb_dequeue(&ser->head); WARN_ON(tmp != skb); - if (in_interrupt()) - dev_kfree_skb_irq(skb); - else - kfree_skb(skb); + dev_consume_skb_any(skb); } } /* Send flow off if queue is empty */ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable) +static void b53_enable_vlan(struct b53_device *dev, bool enable, + bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; - vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; - vc5 |= VC5_DROP_VTABLE_MISS; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } if (is5325(dev)) vc0 &= ~VC0_RESERVED_1; @@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) } b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + + dev->vlan_enabled = enable; + dev->vlan_filtering_enabled = enable_filtering; } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static u16 b53_default_pvid(struct b53_device *dev) +{ + if (is5325(dev) || is5365(dev)) + return 1; + else + return 0; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; struct b53_vlan vl = { 0 }; - int i; + int i, def_vid; + + def_vid = b53_default_pvid(dev); /* clear all vlan entries */ if (is5325(dev) || is5365(dev)) { - for (i = 1; i < dev->num_vlans; i++) + for (i = def_vid; i < dev->num_vlans; i++) b53_set_vlan_entry(dev, i, &vl); } else { b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false); + b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), 1); + B53_VLAN_PORT_DEF_TAG(i), def_vid); if (!is5325(dev) && !is5365(dev)) b53_set_jumbo(dev, dev->enable_jumbo, false); @@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { + struct b53_device *dev = ds->priv; + struct net_device *bridge_dev; + unsigned int i; + u16 pvid, new_pvid; + + /* Handle the case were multiple bridges span the same switch device + * and one of them has a different setting than what is being requested + * which would be breaking filtering semantics for any of the other + * bridge devices. + */ + b53_for_each_port(dev, i) { + bridge_dev = dsa_to_port(ds, i)->bridge_dev; + if (bridge_dev && + bridge_dev != dsa_to_port(ds, port)->bridge_dev && + br_vlan_enabled(bridge_dev) != vlan_filtering) { + netdev_err(bridge_dev, + "VLAN filtering is global to the switch!\n"); + return -EINVAL; + } + } + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + new_pvid = pvid; + if (dev->vlan_filtering_enabled && !vlan_filtering) { + /* Filtering is currently enabled, use the default PVID since + * the bridge does not expect tagging anymore + */ + dev->ports[port].pvid = pvid; + new_pvid = b53_default_pvid(dev); + } else if (!dev->vlan_filtering_enabled && vlan_filtering) { + /* Filtering is currently disabled, restore the previous PVID */ + new_pvid = dev->ports[port].pvid; + } + + if (pvid != new_pvid) + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + new_pvid); + + b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + return 0; } EXPORT_SYMBOL(b53_vlan_filtering); @@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid_end > dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true); + b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); return 0; } @@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } - if (pvid) { + if (pvid && !dsa_is_cpu_port(ds, port)) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), vlan->vid_end); b53_fast_age_vlan(dev, vid); @@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, vl->members &= ~BIT(port); - if (pvid == vid) { - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; - } + if (pvid == vid) + pvid = b53_default_pvid(dev); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag &= ~(BIT(port)); @@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; + pvid = b53_default_pvid(dev); /* Make this port join all VLANs without VLAN entries */ if (is58xx(dev)) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -91,6 +91,7 @@ enum { struct b53_port { u16 vlan_ctl_mask; struct ethtool_eee eee; + u16 pvid; }; struct b53_vlan { @@ -137,6 +138,8 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; + bool vlan_enabled; + bool vlan_filtering_enabled; unsigned int num_ports; struct b53_port *ports; }; diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) /* Clear all pending interrupts */ writel(0xffffffff, priv->regs + B53_SRAB_INTR); - if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) - return; - for (i = 0; i < B53_N_PORTS; i++) { port = &priv->port_intrs[i]; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..14138d423cf1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) * port, the other ones have already been disabled during * bcm_sf2_sw_setup */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) bcm_sf2_port_disable(ds, port, NULL); } @@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; /* Get the parent device WoL settings */ - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; @@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..12fd7ce3f1ff 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) unsigned int sub_irq; unsigned int n; u16 reg; + u16 ctl1; int err; mutex_lock(&chip->reg_lock); @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) if (err) goto out; - for (n = 0; n < chip->g1_irq.nirqs; ++n) { - if (reg & (1 << n)) { - sub_irq = irq_find_mapping(chip->g1_irq.domain, n); - handle_nested_irq(sub_irq); - ++nhandled; + do { + for (n = 0; n < chip->g1_irq.nirqs; ++n) { + if (reg & (1 << n)) { + sub_irq = irq_find_mapping(chip->g1_irq.domain, + n); + handle_nested_irq(sub_irq); + ++nhandled; + } } - } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); + if (err) + goto unlock; + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); +unlock: + mutex_unlock(&chip->reg_lock); + if (err) + goto out; + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); + } while (reg & ctl1); + out: return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) { struct mv88e6xxx_chip *chip = dev_id; struct mv88e6xxx_atu_entry entry; + int spid; int err; u16 val; @@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; + spid = entry.state; + if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU age out violation for %pM\n", @@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU member violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_member_violation++; + "ATU member violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_member_violation++; } if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU miss violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_miss_violation++; + "ATU miss violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_miss_violation++; } if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU full violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_full_violation++; + "ATU full violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_full_violation++; } mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 2caa8c8b4b55..1bfc5ff8d81d 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) if (port < 9) return 0; - return mv88e6390_serdes_irq_setup(chip, port); + return mv88e6390x_serdes_irq_setup(chip, port); } void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 4f11f98347ed..1827ef1f6d55 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev, if (skb) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); info->skb = NULL; } diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 0fb986ba3290..0ae723f75341 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) & 0xffff; if (inuse) { /* Tx FIFO is not empty */ - ready = priv->tx_prod - priv->tx_cons - inuse - 1; + ready = max_t(int, + priv->tx_prod - priv->tx_cons - inuse - 1, 0); } else { /* Check for buffered last packet */ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) goto err_device_destroy; } - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); - /* Make sure we don't have a race with AENQ Links state handler */ - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) - netif_carrier_on(adapter->netdev); - rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc) { @@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) } set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_err(&pdev->dev, "Device reset completed successfully, Driver info: %s\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 2 #define DRV_MODULE_VER_MINOR 0 -#define DRV_MODULE_VER_SUBMINOR 2 +#define DRV_MODULE_VER_SUBMINOR 3 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index a90080f12e67..e548c0ae2e00 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev) pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], lp->tx_skbuff[tx_index]->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); + dev_consume_skb_irq(lp->tx_skbuff[tx_index]); lp->tx_skbuff[tx_index] = NULL; lp->tx_dma_addr[tx_index] = 0; } diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 6a8e2567f2bd..4d3855ceb500 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) if (bp->tx_bufs[bp->tx_empty]) { ++dev->stats.tx_packets; - dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); + dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); } bp->tx_bufs[bp->tx_empty] = NULL; bp->tx_fullup = 0; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bb41becb6609..31ff1e0d1baa 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; - static int cards_found; + static int cards_found = 0; unsigned long mmio_start; int mmio_len; int err; - cards_found = 0; - err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f44808959ff3..97ab0dd25552 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp) bytes_compl += skb->len; pkts_compl++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); @@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = bounce_skb; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); reg = rxchk_readl(priv, RXCHK_CONTROL); + /* Clear L2 header checks, which would prevent BPDUs + * from being received. + */ + reg &= ~RXCHK_L2_HDR_DIS; if (priv->rx_chk_en) reg |= RXCHK_EN; else @@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcm_sysport_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; @@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, if (!(priv->wolopts & WAKE_MAGICSECURE)) return; - /* Return the programmed SecureOn password */ - reg = umac_readl(priv, UMAC_PSW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = umac_readl(priv, UMAC_PSW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } static int bcm_sysport_set_wol(struct net_device *dev, @@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, if (wol->wolopts & ~supported) return -EINVAL; - /* Program the SecureOn password */ - if (wol->wolopts & WAKE_MAGICSECURE) { - umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_PSW_MS); - umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_PSW_LS); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) unsigned int index, i = 0; u32 reg; - /* Password has already been programmed */ reg = umac_readl(priv, UMAC_MPD_CTRL); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) reg |= MPD_EN; reg &= ~PSW_EN; - if (priv->wolopts & WAKE_MAGICSECURE) + if (priv->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_PSW_MS); + umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_PSW_LS); reg |= PSW_EN; + } umac_writel(priv, reg, UMAC_MPD_CTRL); if (priv->wolopts & WAKE_FILTER) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include <linux/bitmap.h> +#include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/net_dim.h> @@ -778,6 +779,7 @@ struct bcm_sysport_priv { unsigned int crc_fwd:1; u16 rev; u32 wolopts; + u8 sopass[SOPASS_MAX]; unsigned int wol_irq_disabled:1; /* MIB related fields */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..d95730c6e0f2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < DFLT_HWRM_CMD_TIMEOUT) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); else @@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, dma_rmb(); if (*valid) break; - udelay(1); + usleep_range(1, 5); } if (j >= HWRM_VALID_BIT_DELAY_USEC) { @@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; u32 map_idx = ring->map_idx; + unsigned int vector; + vector = bp->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); - if (rc) + if (rc) { + enable_irq(vector); goto err_out; + } bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + enable_irq(vector); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; if (!i) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a451796deefe..2fb653e0048d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -582,7 +582,7 @@ struct nqe_cn { (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) -#define HWRM_VALID_BIT_DELAY_USEC 20 +#define HWRM_VALID_BIT_DELAY_USEC 150 #define BNXT_HWRM_CHNL_CHIMP 0 #define BNXT_HWRM_CHNL_KONG 1 diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, * for transmits, we just free buffers. */ - dev_kfree_skb_irq(sb); + dev_consume_skb_irq(sb); /* * .. and advance to the next buffer. diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 3d45f4c92cf6..9bbaad9f3d63 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -643,6 +643,7 @@ #define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 +#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -1214,6 +1215,8 @@ struct macb { int rx_bd_rd_prefetch; int tx_bd_rd_prefetch; + + u32 rx_intr_mask; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 66cc7927061a..2b2882615e8b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -56,8 +56,7 @@ /* level of occupied TX descriptors under which we wake up TX process */ #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) -#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ - | MACB_BIT(ISR_ROVR)) +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ | MACB_BIT(ISR_RLE) \ | MACB_BIT(TXERR)) @@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget) queue_writel(queue, ISR, MACB_BIT(RCOMP)); napi_reschedule(napi); } else { - queue_writel(queue, IER, MACB_RX_INT_FLAGS); + queue_writel(queue, IER, bp->rx_intr_mask); } } @@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data) u32 ctrl; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, IDR, MACB_RX_INT_FLAGS | + queue_writel(queue, IDR, bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned int)(queue - bp->queues), (unsigned long)status); - if (status & MACB_RX_INT_FLAGS) { + if (status & bp->rx_intr_mask) { /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts * now. */ - queue_writel(queue, IDR, MACB_RX_INT_FLAGS); + queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); @@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) /* There is a hardware issue under heavy load where DMA can * stop, this causes endless "used buffer descriptor read" * interrupts but it can be cleared by re-enabling RX. See - * the at91 manual, section 41.3.1 or the Zynq manual - * section 16.7.4 for details. + * the at91rm9200 manual, section 41.3.1 or the Zynq manual + * section 16.7.4 for details. RXUBR is only enabled for + * these two versions. */ if (status & MACB_BIT(RXUBR)) { ctrl = macb_readl(bp, NCR); @@ -2259,7 +2259,7 @@ static void macb_init_hw(struct macb *bp) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -3907,6 +3907,7 @@ static const struct macb_config sama5d4_config = { }; static const struct macb_config emac_config = { + .caps = MACB_CAPS_NEEDS_RSTONUBR, .clk_init = at91ether_clk_init, .init = at91ether_init, }; @@ -3928,7 +3929,8 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | + MACB_CAPS_NEEDS_RSTONUBR, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4083,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev) macb_dma_desc_get_size(bp); } + bp->rx_intr_mask = MACB_RX_INT_FLAGS; + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) + bp->rx_intr_mask |= MACB_BIT(RXUBR); + mac = of_get_mac_address(np); if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..05f4a3b21e29 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -54,7 +54,6 @@ config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" depends on 64BIT && PCI imply PTP_1588_CLOCK - default y ---help--- This driver adds support for the Precision Time Protocol Clocks and Timestamping coprocessor (PTP) found on Cavium processors. diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index f4d81765221e..62636c1ed141 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -271,7 +271,7 @@ struct xcast_addr_list { }; struct nicvf_work { - struct delayed_work work; + struct work_struct work; u8 mode; struct xcast_addr_list *mc; }; @@ -327,7 +327,11 @@ struct nicvf { struct nicvf_work rx_mode_work; /* spinlock to protect workqueue arguments from concurrent access */ spinlock_t rx_mode_wq_lock; - + /* workqueue for handling kernel ndo_set_rx_mode() calls */ + struct workqueue_struct *nicvf_rx_mode_wq; + /* mutex to protect VF's mailbox contents from concurrent access */ + struct mutex rx_mode_mtx; + struct delayed_work link_change_work; /* PTP timestamp */ struct cavium_ptp *ptp_clock; /* Inbound timestamping is on */ @@ -575,10 +579,8 @@ struct set_ptp { struct xcast { u8 msg; - union { - u8 mode; - u64 mac; - } data; + u8 mode; + u64 mac:48; }; /* 128 bit shared memory between PF and each VF */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 6c8dcb65ff03..c90252829ed3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -57,14 +57,8 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 *vf_lmac_map; - struct delayed_work dwork; - struct workqueue_struct *check_link; - u8 *link; - u8 *duplex; - u32 *speed; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; - bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ u8 num_vec; @@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp) nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); } +/* Get BGX LMAC link status and update corresponding VF + * if there is a change, valid only if internal L2 switch + * is not present otherwise VF link is always treated as up + */ +static void nic_link_status_get(struct nicpf *nic, u8 vf) +{ + union nic_mbx mbx = {}; + struct bgx_link_status link; + u8 bgx, lmac; + + mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + + /* Get BGX, LMAC indices for the VF */ + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + /* Get interface link status */ + bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); + + /* Send a mbox message to VF with current link status */ + mbx.link_status.link_up = link.link_up; + mbx.link_status.duplex = link.duplex; + mbx.link_status.speed = link.speed; + mbx.link_status.mac_type = link.mac_type; + + /* reply with link status */ + nic_send_msg_to_vf(nic, vf, &mbx); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) int i; int ret = 0; - nic->mbx_lock[vf] = true; - mbx_addr = nic_get_mbx_addr(vf); mbx_data = (u64 *)&mbx; @@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - if (vf < nic->num_vf_en) { - nic->link[vf] = 0; - nic->duplex[vf] = 0; - nic->speed[vf] = 0; - } - goto unlock; + return; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); @@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_RSS_SIZE: nic_send_rss_size(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_RSS_CFG: case NIC_MBOX_MSG_RSS_CFG_CONT: nic_config_rss(nic, &mbx.rss_cfg); @@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic_enable_vf(nic, vf, true); - goto unlock; + break; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ if (vf >= nic->num_vf_en) @@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); - goto unlock; + return; case NIC_MBOX_MSG_NICVF_PTR: nic->nicvf[vf] = mbx.nicvf.nicvf; break; case NIC_MBOX_MSG_PNICVF_PTR: nic_send_pnicvf(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_SNICVF_PTR: nic_send_snicvf(nic, &mbx.nicvf); - goto unlock; + return; case NIC_MBOX_MSG_BGX_STATS: nic_get_bgx_stats(nic, &mbx.bgx_stats); - goto unlock; + return; case NIC_MBOX_MSG_LOOPBACK: ret = nic_config_loopback(nic, &mbx.lbk); break; @@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_PFC: nic_pause_frame(nic, vf, &mbx.pfc); - goto unlock; + return; case NIC_MBOX_MSG_PTP_CFG: nic_config_timestamp(nic, vf, &mbx.ptp); break; @@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); bgx_set_dmac_cam_filter(nic->node, bgx, lmac, - mbx.xcast.data.mac, + mbx.xcast.mac, vf < NIC_VF_PER_MBX_REG ? vf : vf - NIC_VF_PER_MBX_REG); break; @@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) } bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); + bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode); break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + nic_link_status_get(nic, vf); + return; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) mbx.msg.msg, vf); nic_mbx_send_nack(nic, vf); } -unlock: - nic->mbx_lock[vf] = false; } static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) @@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) return 0; } -/* Poll for BGX LMAC link status and update corresponding VF - * if there is a change, valid only if internal L2 switch - * is not present otherwise VF link is always treated as up - */ -static void nic_poll_for_link(struct work_struct *work) -{ - union nic_mbx mbx = {}; - struct nicpf *nic; - struct bgx_link_status link; - u8 vf, bgx, lmac; - - nic = container_of(work, struct nicpf, dwork.work); - - mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - - for (vf = 0; vf < nic->num_vf_en; vf++) { - /* Poll only if VF is UP */ - if (!nic->vf_enabled[vf]) - continue; - - /* Get BGX, LMAC indices for the VF */ - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - /* Get interface link status */ - bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); - - /* Inform VF only if link status changed */ - if (nic->link[vf] == link.link_up) - continue; - - if (!nic->mbx_lock[vf]) { - nic->link[vf] = link.link_up; - nic->duplex[vf] = link.duplex; - nic->speed[vf] = link.speed; - - /* Send a mbox message to VF with current link status */ - mbx.link_status.link_up = link.link_up; - mbx.link_status.duplex = link.duplex; - mbx.link_status.speed = link.speed; - mbx.link_status.mac_type = link.mac_type; - nic_send_msg_to_vf(nic, vf, &mbx); - } - } - queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); -} - static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; @@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!nic->vf_lmac_map) goto err_release_regions; - nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->link) - goto err_release_regions; - - nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->duplex) - goto err_release_regions; - - nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); - if (!nic->speed) - goto err_release_regions; - /* Initialize hardware */ nic_init_hw(nic); @@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_unregister_interrupts; - /* Register a physical link status poll fn() */ - nic->check_link = alloc_workqueue("check_link_status", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (!nic->check_link) { - err = -ENOMEM; - goto err_disable_sriov; - } - - INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); - queue_delayed_work(nic->check_link, &nic->dwork, 0); - return 0; -err_disable_sriov: - if (nic->flags & NIC_SRIOV_ENABLED) - pci_disable_sriov(pdev); err_unregister_interrupts: nic_unregister_interrupts(nic); err_release_regions: @@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev) if (nic->flags & NIC_SRIOV_ENABLED) pci_disable_sriov(pdev); - if (nic->check_link) { - /* Destroy work Queue */ - cancel_delayed_work_sync(&nic->dwork); - destroy_workqueue(nic->check_link); - } - nic_unregister_interrupts(nic); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 88f8a8fa93cd..503cfadff4ac 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -/* workqueue for handling kernel ndo_set_rx_mode() calls */ -static struct workqueue_struct *nicvf_rx_mode_wq; - static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { int timeout = NIC_MBOX_MSG_TIMEOUT; int sleep = 10; + int ret = 0; + + mutex_lock(&nic->rx_mode_mtx); nic->pf_acked = false; nic->pf_nacked = false; @@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF NACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EINVAL; + ret = -EINVAL; + break; } msleep(sleep); if (nic->pf_acked) @@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EBUSY; + ret = -EBUSY; + break; } } - return 0; + mutex_unlock(&nic->rx_mode_mtx); + return ret; } /* Checks if VF is able to comminicate with PF @@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic) return 1; } +static void nicvf_send_cfg_done(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + netdev_err(nic->netdev, + "PF didn't respond to CFG DONE msg\n"); + } +} + static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) { if (bgx->rx) @@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) break; case NIC_MBOX_MSG_BGX_LINK_CHANGE: nic->pf_acked = true; - nic->link_up = mbx.link_status.link_up; - nic->duplex = mbx.link_status.duplex; - nic->speed = mbx.link_status.speed; - nic->mac_type = mbx.link_status.mac_type; - if (nic->link_up) { - netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", - nic->speed, - nic->duplex == DUPLEX_FULL ? - "Full" : "Half"); - netif_carrier_on(nic->netdev); - netif_tx_start_all_queues(nic->netdev); - } else { - netdev_info(nic->netdev, "Link is Down\n"); - netif_carrier_off(nic->netdev); - netif_tx_stop_all_queues(nic->netdev); + if (nic->link_up != mbx.link_status.link_up) { + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + nic->mac_type = mbx.link_status.mac_type; + if (nic->link_up) { + netdev_info(nic->netdev, + "Link is Up %d Mbps %s duplex\n", + nic->speed, + nic->duplex == DUPLEX_FULL ? + "Full" : "Half"); + netif_carrier_on(nic->netdev); + netif_tx_start_all_queues(nic->netdev); + } else { + netdev_info(nic->netdev, "Link is Down\n"); + netif_carrier_off(nic->netdev); + netif_tx_stop_all_queues(nic->netdev); + } } break; case NIC_MBOX_MSG_ALLOC_SQS: @@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; + cancel_delayed_work_sync(&nic->link_change_work); + + /* wait till all queued set_rx_mode tasks completes */ + drain_workqueue(nic->nicvf_rx_mode_wq); + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) return nicvf_send_msg_to_pf(nic, &mbx); } +static void nicvf_link_status_check_task(struct work_struct *work_arg) +{ + struct nicvf *nic = container_of(work_arg, + struct nicvf, + link_change_work.work); + union nic_mbx mbx = {}; + mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + nicvf_send_msg_to_pf(nic, &mbx); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 2 * HZ); +} + int nicvf_open(struct net_device *netdev) { int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - union nic_mbx mbx = {}; + + /* wait till all queued set_rx_mode tasks completes if any */ + drain_workqueue(nic->nicvf_rx_mode_wq); netif_carrier_off(netdev); @@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); /* Send VF config done msg to PF */ - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; - nicvf_write_to_mbx(nic, &mbx); + nicvf_send_cfg_done(nic); + + INIT_DELAYED_WORK(&nic->link_change_work, + nicvf_link_status_check_task); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 0); return 0; cleanup: @@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* flush DMAC filters and reset RX mode */ mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add * its' own LMAC to the filter to accept packets for it. */ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = 0; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = 0; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } /* check if we have any specific MACs to be added to PF DMAC filter */ @@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* now go through kernel list of MACs and add them one by one */ for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = mc_addrs->mc[idx]; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = mc_addrs->mc[idx]; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } - kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; - mbx.xcast.data.mode = mode; + mbx.xcast.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); +free_mc: + kfree(mc_addrs); } static void nicvf_set_rx_mode_task(struct work_struct *work_arg) { struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); + work); struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); u8 mode; struct xcast_addr_list *mc; @@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev) kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); spin_unlock(&nic->rx_mode_wq_lock); } @@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); - INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", + WQ_MEM_RECLAIM, + nic->vf_id); + INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); + mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { @@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev) nic = netdev_priv(netdev); pnetdev = nic->pnicvf->netdev; - cancel_delayed_work_sync(&nic->rx_mode_work.work); - /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. */ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) unregister_netdev(pnetdev); + if (nic->nicvf_rx_mode_wq) { + destroy_workqueue(nic->nicvf_rx_mode_wq); + nic->nicvf_rx_mode_wq = NULL; + } nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); if (nic->drv_stats) @@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = { static int __init nicvf_init_module(void) { pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); - nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", - WQ_MEM_RECLAIM); return pci_register_driver(&nicvf_driver); } static void __exit nicvf_cleanup_module(void) { - if (nicvf_rx_mode_wq) { - destroy_workqueue(nicvf_rx_mode_wq); - nicvf_rx_mode_wq = NULL; - } pci_unregister_driver(&nicvf_driver); } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index e337da6ba2a4..673c57b8023f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx) /* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) - bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); + bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); } static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index cbdd20b9ee6f..5cbc54e9eb19 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -60,7 +60,7 @@ #define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) #define RX_DMAC_COUNT 32 -#define BGX_CMR_RX_STREERING 0x300 +#define BGX_CMR_RX_STEERING 0x300 #define RX_TRAFFIC_STEER_RULE_COUNT 8 #define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_BIST_STATUS 0x460 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index c041f44324db..b3654598a2d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->udb_density = 1 << adap->params.sge.eq_qpp; lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); lld->filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 5fa9a2d5fc4b..21da34a4ca24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -336,6 +336,7 @@ struct cxgb4_lld_info { unsigned int cclk_ps; /* Core clock period in psec */ unsigned short udb_density; /* # of user DB/page */ unsigned short ucq_density; /* # of user CQs/page */ + unsigned int sge_host_page_size; /* SGE host page size */ unsigned short filt_mode; /* filter optional components */ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ /* scheduler queue */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..9a7f70db20c7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, * csum is correct or is zero. */ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { + tcp_udp_csum_ok && outer_csum_ok && + (ipv4_csum_ok || ipv6)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = encap; } diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) netif_dbg(de, tx_done, de->dev, "tx done, slot %d\n", tx_tail); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } next: diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; #else +static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, @@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; + regs->version = fec_enet_register_version; + memset(buf, 0, regs->len); for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } spin_unlock(&priv->lock); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) u16 i, j; u8 __iomem *bd; + netdev_reset_queue(ugeth->ndev); + ug_info = ugeth->ug_info; uf_info = &ug_info->uf_info; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); + put_device(&pdev->dev); return -ENODEV; } @@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); + put_device(&pdev->dev); return -ENODEV; } @@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5b33238c6680..60e7d7ae3787 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2418,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); out_read_prop_fail: + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); return ret; } @@ -2447,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev) set_bit(NIC_STATE_REMOVING, &priv->state); (void)cancel_work_sync(&priv->service_task); + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); + free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 8e9b95871d30..ce15d2350db9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev) */ static int hns_nic_nway_reset(struct net_device *netdev) { - int ret = 0; struct phy_device *phy = netdev->phydev; - if (netif_running(netdev)) { - /* if autoneg is disabled, don't restart auto-negotiation */ - if (phy && phy->autoneg == AUTONEG_ENABLE) - ret = genphy_restart_aneg(phy); - } + if (!netif_running(netdev)) + return 0; - return ret; + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); } static u32 diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 017e08452d8c..baf5cc251f32 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) } hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + MDIO_C45_READ, phy_id, devad); } /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index d719668a6684..92929750f832 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) dev->stats.tx_aborted_errors++; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tx_cmd->cmd.command = 0; /* Mark free */ break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f52e2c46e6a7..e4ff531db14a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); if (!ok) { + /* Log this in case the user has forgotten to give the kernel + * any buffers, even later in the application. + */ dev_info(&vsi->back->pdev->dev, - "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", ring->xsk_umem ? "UMEM enabled " : "", ring->queue_index, pf_q); } @@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit + * calls are completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); + } i40e_clean_rx_ring(vsi->rx_rings[i]); } @@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->xdp_rings[i]->xsk_umem) + (void)i40e_xsk_async_xmit(vsi->netdev, i); + return 0; } @@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit calls are + * completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a7e14e98889f..6c97667d20ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_ring *xdp_ring; int drops = 0; int i; @@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 870cf654e436..3827f16e6923 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_async_xmit(vsi->netdev, qid); + if (err) + return err; } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index daff8183534b..cb35d8202572 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_VMDQRSS64EN; - /* Enable L3/L4 for Tx Switched packets */ - mrqc |= IXGBE_MRQC_L3L4TXSWEN; + /* Enable L3/L4 for Tx Switched packets only for X550, + * older devices do not support this feature + */ + if (hw->mac.type >= ixgbe_mac_X550) + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct ixgbe_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; + bool need_reset; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return -EINVAL; @@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -ENOMEM; old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); /* If transitioning XDP modes reconfigure rings */ - if (!!prog != !!old_prog) { + if (need_reset) { int err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { @@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->xdp_ring[i]->xsk_umem) + (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 65c3e2c979d4..36a8879536a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, ixgbe_txrx_ring_disable(adapter, qid); err = ixgbe_add_xsk_umem(adapter, umem, qid); + if (err) + return err; - if (if_running) + if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); - return err; + /* Kick start the NAPI context so that receiving will start */ + err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + if (err) + return err; + } + + return 0; } static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) @@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + if (unlikely(!ixgbe_desc_unused(xdp_ring)) || + !netif_carrier_ok(xdp_ring->netdev)) { work_done = false; break; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - return ret; + goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d4568eb2297..8433fb9c3eee 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2146,7 +2146,7 @@ err_drop_frame: if (unlikely(!skb)) goto err_drop_frame_ret_pool; - dma_sync_single_range_for_cpu(dev->dev.parent, + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, rx_desc->buf_phys_addr, MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes, diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, memset(p, 0, regs->len); memcpy_fromio(p, io, B3_RAM_ADDR); - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, - regs->len - B3_RI_WTO_R1); + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } } /* Wake on Lan only supported on Yukon chips with rev 1 or above */ diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 200; + pdev->d3_delay = 300; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); err = -EINVAL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, } #endif +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + /* We reach this function only after checking that any of * the (IPv4 | IPv6) bits are set in cqe->status. */ @@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { __wsum hw_checksum = 0; + void *hdr; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to skip + * checksum complete. + */ + if (short_frame(skb->len)) + return -EINVAL; - void *hdr = (u8 *)va + sizeof(struct ethhdr); - + hdr = (u8 *)va + sizeof(struct ethhdr); hw_checksum = csum_unfold((__force __sum16)cqe->checksum); if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && @@ -819,6 +832,11 @@ xdp_drop_no_cnt: skb_record_rx_queue(skb, cq_ring); if (likely(dev->features & NETIF_F_RXCSUM)) { + /* TODO: For IP non TCP/UDP packets when csum complete is + * not an option (not supported or any other reason) we can + * actually check cqe IPOK status bit and report + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE + */ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) && (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..e267ff93e8a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1583,6 +1583,24 @@ no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } +void mlx5_cmd_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + while (down_trylock(&cmd->sem)) + mlx5_cmd_trigger_completions(dev); + + while (down_trylock(&cmd->pages_sem)) + mlx5_cmd_trigger_completions(dev); + + /* Unlock cmdif */ + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8fa8fdd30b85..448a92561567 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -657,6 +657,7 @@ struct mlx5e_channel_stats { enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, + MLX5E_STATE_XDP_TX_ENABLED, }; struct mlx5e_rqt { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..f3c7ab6faea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's importent to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v) + void *headers_v, u8 *match_level) { int tunnel_type; int err = 0; tunnel_type = mlx5e_tc_tun_get_type(filter_dev); if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + *match_level = MLX5_MATCH_L4; err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, headers_c, headers_v); } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + *match_level = MLX5_MATCH_L3; err = mlx5e_tc_tun_parse_gretap(priv, spec, f, headers_c, headers_v); } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v); + void *headers_v, u8 *match_level); #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int sq_num; int i; - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + /* this flag is sufficient, no need to test internal sq state */ + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, sq = &priv->channels.c[sq_num]->xdpsq; - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return -ENETDOWN; - for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; struct mlx5e_xdp_info xdpi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) +{ + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + /* let other device's napi(s) see our new state */ + synchronize_rcu(); +} + +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) +{ + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { if (sq->doorbell_cseg) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..47233b9a4f81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, if (arfs_enabled) mlx5e_arfs_disable(priv); + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + /* Switch to new channels, set new parameters and close old ones */ mlx5e_switch_priv_channels(priv, &new_channels, NULL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8cfd2ec7c0a2..93e50ccd44c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); return 0; @@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); + mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); if (mlx5e_is_vport_rep(priv)) @@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 04736212a21c..ef9e472daffb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { ether_addr_copy(e->h_dest, ha); ether_addr_copy(eth->h_dest, ha); + /* Update the encap source mac, in case that we delete + * the flows when encap source mac changed. + */ + ether_addr_copy(eth->h_source, e->route_dev->dev_addr); mlx5e_tc_encap_flows_add(priv, e); } @@ -1126,9 +1130,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - int ret; + int ret, pf_num; + + ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); + if (ret) + return ret; + + if (rep->vport == FDB_UPLINK_VPORT) + ret = snprintf(buf, len, "p%d", pf_num); + else + ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); - ret = snprintf(buf, len, "%d", rep->vport - 1); if (ret >= len) return -EOPNOTSUPP; @@ -1285,6 +1297,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) return 0; } +static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); + + if (vlan != 0) + return -EOPNOTSUPP; + + /* allow setting 0-vid for compatibility with libvirt */ + return 0; +} + static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@ -1319,6 +1343,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_set_vf_rate = mlx5e_set_vf_rate, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_stats = mlx5e_get_vf_stats, + .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, }; bool mlx5e_eswitch_rep(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..36eafc877e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -148,6 +148,7 @@ struct mlx5e_encap_entry { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; + struct net_device *route_dev; int tunnel_type; int tunnel_hlen; int reformat_type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cae6c6d48984..b5c1b039375a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr { struct net_device *filter_dev; struct mlx5_flow_spec spec; int num_mod_hdr_actions; + int max_mod_hdr_actions; void *mod_hdr_actions; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; }; @@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - struct net_device *filter_dev) + struct net_device *filter_dev, u8 *match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, int err = 0; err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, - headers_c, headers_v); + headers_c, headers_v, match_level); if (err) { NL_SET_ERR_MSG_MOD(extack, "failed to parse tunnel attributes"); @@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, struct net_device *filter_dev, - u8 *match_level) + u8 *match_level, u8 *tunnel_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - if (parse_tunnel_attr(priv, spec, f, filter_dev)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) return -EOPNOTSUPP; break; default: @@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; + u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; - u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); + err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; @@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { flow->esw_attr->match_level = match_level; - else + flow->esw_attr->tunnel_match_level = tunnel_match_level; + } else { flow->nic_attr->match_level = match_level; + } return err; } @@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 2, udp.dest, 0), }; -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at - * max from the SW pedit action. On success, it says how many HW actions were - * actually parsed. +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at + * max from the SW pedit action. On success, attr->num_mod_hdr_actions + * says how many HW actions were actually parsed. */ static int offload_pedit_fields(struct pedit_headers *masks, struct pedit_headers *vals, @@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); - action = parse_attr->mod_hdr_actions; - max_actions = parse_attr->num_mod_hdr_actions; - nactions = 0; + action = parse_attr->mod_hdr_actions + + parse_attr->num_mod_hdr_actions * action_size; + + max_actions = parse_attr->max_mod_hdr_actions; + nactions = parse_attr->num_mod_hdr_actions; for (i = 0; i < ARRAY_SIZE(fields); i++) { f = &fields[i]; @@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, if (!parse_attr->mod_hdr_actions) return -ENOMEM; - parse_attr->num_mod_hdr_actions = max_actions; + parse_attr->max_mod_hdr_actions = max_actions; return 0; } @@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, goto out_err; } - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); - if (err) - goto out_err; + if (!parse_attr->mod_hdr_actions) { + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); + if (err) + goto out_err; + } err = offload_pedit_fields(masks, vals, parse_attr, extack); if (err < 0) @@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, static bool modify_header_match_supported(struct mlx5_flow_spec *spec, struct tcf_exts *exts, + u32 actions, struct netlink_ext_ack *extack) { const struct tc_action *a; @@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, u16 ethertype; int nkeys, i; - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); + else + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) return modify_header_match_supported(&parse_attr->spec, exts, - extack); + actions, extack); return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..0e55cd1f2e98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); if (unlikely(contig_wqebbs_room < num_wqebbs)) { +#ifdef CONFIG_MLX5_EN_IPSEC + struct mlx5_wqe_eth_seg cur_eth = wqe->eth; +#endif mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); +#ifdef CONFIG_MLX5_EN_IPSEC + wqe->eth = cur_eth; +#endif } /* fill wqe */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index a44ea7b85614..5b492b67f4e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, int err = 0; u8 *smac_v; - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { - mlx5_core_warn(esw->dev, - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", - vport->vport); - return -EPERM; - } - esw_vport_cleanup_ingress_rules(esw, vport); if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { @@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, @@ -1797,7 +1790,7 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); @@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", + "Set invalid MAC while spoofchk is on, vport(%d)\n", vport); - err = -EPERM; - goto unlock; - } err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); if (err) { @@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, evport = &esw->vports[vport]; pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; + if (pschk && !is_valid_ether_addr(evport->info.mac)) + mlx5_core_warn(esw->dev, + "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..748ff178a1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; u8 match_level; + u8 tunnel_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..d4e6fe5b9300 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_eswitch_owner_vhca_id); - if (attr->match_level == MLX5_MATCH_NONE) - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - else - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS; - - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (attr->tunnel_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + if (attr->match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + } else if (attr->match_level != MLX5_MATCH_NONE) { + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..503035469d2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data enum port_module_event_status_type module_status; enum port_module_event_error_type error_type; struct mlx5_eqe_port_module *module_event_eqe; - const char *status_str, *error_str; + const char *status_str; u8 module_num; module_event_eqe = &eqe->data.port_module; - module_num = module_event_eqe->module; module_status = module_event_eqe->module_status & PORT_MODULE_EVENT_MODULE_STATUS_MASK; error_type = module_event_eqe->error_type & @@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data if (module_status < MLX5_MODULE_STATUS_NUM) events->pme_stats.status_counters[module_status]++; - status_str = mlx5_pme_status_to_string(module_status); - if (module_status == MLX5_MODULE_STATUS_ERROR) { + if (module_status == MLX5_MODULE_STATUS_ERROR) if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) events->pme_stats.error_counters[error_type]++; - error_str = mlx5_pme_error_to_string(error_type); - } if (!printk_ratelimit()) return NOTIFY_OK; - if (module_status == MLX5_MODULE_STATUS_ERROR) + module_num = module_event_eqe->module; + status_str = mlx5_pme_status_to_string(module_status); + if (module_status == MLX5_MODULE_STATUS_ERROR) { + const char *error_str = mlx5_pme_error_to_string(error_type); + mlx5_core_err(events->dev, "Port module event[error]: module %u, %s, %s\n", module_num, status_str, error_str); - else + } else { mlx5_core_info(events->dev, "Port module event: module %u, %s\n", module_num, status_str); + } return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - mlx5_cmd_trigger_completions(dev); + mlx5_cmd_flush(dev); } mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 3a6baed722d8..2d223385dc81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) } } +int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) +{ + struct mlx5_lag *ldev; + int n; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev) { + mlx5_core_warn(dev, "no lag device, can't get pf num\n"); + return -EINVAL; + } + + for (n = 0; n < MLX5_MAX_PORTS; n++) + if (ldev->pf[n].dev == dev) { + *pf_num = n; + return 0; + } + + mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); + return -EINVAL; +} + /* Must be called with intf_mutex held */ void mlx5_lag_remove(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index c68dcea5985b..4fdac020b795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, struct ptp_system_timestamp *sts); void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); +void mlx5_cmd_flush(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); @@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } +int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); void mlx5_lag_update(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 388f205a497f..370ca94b6775 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common * mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) { struct mlx5_core_rsc_common *common; + unsigned long flags; - spin_lock(&table->lock); + spin_lock_irqsave(&table->lock, flags); common = radix_tree_lookup(&table->tree, rsn); if (common) atomic_inc(&common->refcount); - spin_unlock(&table->lock); + spin_unlock_irqrestore(&table->lock, flags); return common; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 32519c93df17..b65e274b02e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + u16 thres_cells; + u16 delay_cells; bool lossy; - u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, continue; lossy = !(pfc || pause_en); - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, - pause_en); - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, + pfc, pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, + thres_cells, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e23ca90289f7..0a868c829b90 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum alu_op alu_op, bool skip) + enum alu_op alu_op) { const struct bpf_insn *insn = &meta->insn; - if (skip) { - meta->skip = true; - return 0; - } - wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); @@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); } static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); } static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); } static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); } static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); } static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..2d8a77cc156b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -53,7 +53,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 33 +#define QED_MINOR_VERSION 37 #define QED_REVISION_VERSION 0 #define QED_ENGINEERING_VERSION 20 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 8f6551421945..2ecaaaa4469a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, /* get pq index according to PQ_FLAGS */ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, - u32 pq_flags) + unsigned long pq_flags) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, + if (bitmap_weight(&pq_flags, sizeof(pq_flags) * BITS_PER_BYTE) > 1) { - DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); goto err; } if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { - DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); goto err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 67c02ea93906..58be1c4c6668 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); @@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, return rc; } + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + /* Update mcast bins for VFs, PF doesn't use this functionality */ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); @@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 num_queues = 0; /* Since the feature controls only queue-zones, - * make sure we have the contexts [rx, tx, xdp] to + * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { @@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; - num_queues += min_t(u16, l2_queues, cids / 3); + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' @@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; @@ -2860,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) p_hwfn = p_cid->p_owner; rc = qed_get_queue_coalesce(p_hwfn, coal, handle); if (rc) - DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Unable to read queue coalescing\n"); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8d80f1095d17..7127d5aaac42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -219,6 +219,9 @@ struct qed_sp_vport_update_params { struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; }; int qed_sp_vport_update(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index d9237c65a838..b5f419b71287 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; + u8 flags = 0, nr_frags; int rc = -EINVAL, i; dma_addr_t mapping; u16 vlan = 0; - u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + /* Cache number of fragments from SKB since SKB may be freed by + * the completion routine after calling qed_ll2_prepare_tx_packet() + */ + nr_frags = skb_shinfo(skb)->nr_frags; + + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", - 1 + skb_shinfo(skb)->nr_frags); + 1 + nr_frags); return -EINVAL; } @@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, } memset(&pkt, 0, sizeof(pkt)); - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.num_of_bds = 1 + nr_frags; pkt.vlan = vlan; pkt.bd_flags = flags; pkt.tx_dest = QED_LL2_TX_DEST_NW; @@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) pkt.remove_stag = true; + /* qed_ll2_prepare_tx_packet() may actually send the packet if + * there are no fragments in the skb and subsequently the completion + * routine may run and free the SKB, so no dereferencing the SKB + * beyond this point unless skb has any fragments. + */ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); if (rc) goto err; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); * @param p_hwfn */ void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); /** * @file diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) p_ent->ramrod.pf_update.update_mf_vlan_flag = true; p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); return qed_spq_post(p_hwfn, p_ent, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..ba64ff9bedbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + return rc; } @@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, return 0; } -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; - int rc; if (!p_hwfn) return -EINVAL; @@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, */ qed_spq_return_entry(p_hwfn, found); - /* Attempt to post pending requests */ - spin_lock_bh(&p_spq->lock); - rc = qed_spq_pend_post(p_hwfn); - spin_unlock_bh(&p_spq->lock); - - return rc; + return 0; } int qed_consq_alloc(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ca6290fa0f30..71a7af134dd8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; - params.check_mac = true; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { @@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; + params.update_ctl_frame_check = 1; + params.mac_chk_en = !vf_info->is_trusted_configured; + if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; flags->rx_accept_filter = vf_info->rx_accept_mode; @@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) } if (flags->update_rx_mode_config || - flags->update_tx_mode_config) + flags->update_tx_mode_config || + params.update_ctl_frame_check) qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index b6cccf44bf40..5dda547772c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vf_pf_resc_request *p_resc; + u8 retry_cnt = VF_ACQUIRE_THRESH; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; @@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + /* Re-try acquire in case of vf-pf hw channel timeout */ + if (retry_cnt && rc == -EBUSY) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + if (rc) goto exit; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..730997b13747 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -56,7 +56,7 @@ #include <net/tc_act/tc_gact.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 33 +#define QEDE_MINOR_VERSION 37 #define QEDE_REVISION_VERSION 0 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -494,6 +494,9 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct qede_dev *edev = netdev_priv(dev); + int total_txq; + + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; + + return QEDE_TSS_COUNT(edev) ? + fallback(dev, skb, NULL) % total_txq : 0; +} + /* 8B udp header + 8B base tunnel header + 32B option length */ #define QEDE_MAX_TUN_HDR_LEN 48 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..9790f26d17c4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 44f6e4873aad..4f910c4f67b0 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp) } bytes_compl += skb->len; pkts_compl++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } cp->tx_skb[tx_tail] = NULL; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index abb94c543aa2..6e36b88ca7c9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp) static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) { RTL_W16(tp, IntrStatus, bits); + mmiowb(); } static void rtl_irq_disable(struct rtl8169_private *tp) { RTL_W16(tp, IntrMask, 0); + mmiowb(); } #define RTL_EVENT_NAPI_RX (RxOK | RxErr) @@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 opts[2], len; - bool stop_queue; int frags; if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { @@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); - if (unlikely(stop_queue)) - netif_stop_queue(dev); + RTL_W8(tp, TxPoll, NPQ); - if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) - RTL_W8(tp, TxPoll, NPQ); + mmiowb(); - if (unlikely(stop_queue)) { + if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); + rtl_irq_enable(tp); + mmiowb(); } return work_done; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2f2bda68d861..c08034154a9a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); - DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) skb = ep->tx_skbuff[entry]; pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); ep->tx_skbuff[entry] = NULL; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 7b923362ee55..3b174eae77c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) } ret = phy_power_on(bsp_priv, true); - if (ret) + if (ret) { + gmac_clk_enable(bsp_priv, false); return ret; + } pm_runtime_enable(dev); pm_runtime_get_sync(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..736e29635b77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); u32 own, ctxt; int ret = 1; - own = p->des3 & RDES3_OWN; - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); if (likely(!own && ctxt)) { - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */ ret = -EINVAL; else diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct stmmac_priv *priv = netdev_priv(dev); + int ret; - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) + if (!edata->eee_enabled) { stmmac_disable_eee_mode(priv); - else { + } else { /* We are asking for enabling the EEE but it is safe * to verify all by invoking the eee_init function. * In case of failure it will return an error. */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) + edata->eee_enabled = stmmac_eee_init(priv); + if (!edata->eee_enabled) return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(dev->phydev, edata); + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + + priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (usec * (clk / 1000000)) / 256; } @@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (riwt * 256) / (clk / 1000000); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5afba69981cf..685d20472358 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q = &priv->tx_queue[queue]; + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + /* + * There is no way to determine the number of TSO + * capable Queues. Let's use always the Queue 0 + * because if TSO is supported then at least this + * one will be capable. + */ + skb_set_queue_mapping(skb, 0); + return stmmac_tso_xmit(skb, dev); + } } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { @@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (priv->tx_path_in_lpi_mode) - stmmac_disable_eee_mode(priv); - entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) cp->net_stats[ring].tx_packets++; cp->net_stats[ring].tx_bytes += skb->len; spin_unlock(&cp->stat_lock[ring]); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } cp->tx_old[ring] = entry; diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); elem = NEXT_TX(elem); } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) this = &txbase[elem]; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dev->stats.tx_packets++; } hp->tx_old = elem; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) tx_level -= db->rptr->len; /* '-' koz len is negative */ /* now should come skb pointer - free it */ - dev_kfree_skb_irq(db->rptr->addr.skb); + dev_consume_skb_irq(db->rptr->addr.skb); bdx_tx_db_inc_rptr(db); } diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 810dfc7de1f9..e2d47b24a869 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue) netdev_dbg(dev, "sent 0x%p, len=%d\n", desc->skb, desc->skb->len); - dev_kfree_skb_irq(desc->skb); + dev_consume_skb_irq(desc->skb); desc->skb = NULL; if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, const char *name; char node_name[32]; - if (of_property_read_string(node, "label", &name) < 0) { + if (of_property_read_string(child, "label", &name) < 0) { snprintf(node_name, sizeof(node_name), "%pOFn", child); name = node_name; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], le16_to_cpu(pktlen), DMA_TO_DEVICE); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tdinfo->skb = NULL; } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) bp->descr_block_virt->xmt_data[comp].long_1, p_xmt_drv_descr->p_skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); + dev_consume_skb_irq(p_xmt_drv_descr->p_skb); /* * Move to start of next packet by updating completion index diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..3377ac66a347 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev, } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { - struct rt6_info *rt = rt6_lookup(geneve->net, - &info->key.u.ipv6.dst, NULL, 0, - NULL, 0); + struct rt6_info *rt; + + if (!__in6_dev_get(dev)) + break; + + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) } break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): - /* rx is starting */ - dev_dbg(printdev(lp), "RX is starting\n"); - mcr20a_handle_rx(lp); + /* rx is starting */ + dev_dbg(printdev(lp), "RX is starting\n"); + mcr20a_handle_rx(lp); break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..07e41c42bcf5 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); if (!err) { mdev->l3mdev_ops = &ipvl_l3mdev_ops; - mdev->priv_flags |= IFF_L3MDEV_MASTER; + mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; } else goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ - mdev->priv_flags &= ~IFF_L3MDEV_MASTER; + mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } @@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev) struct sk_buff *skb; if (port->mode == IPVLAN_MODE_L3S) { - dev->priv_flags &= ~IFF_L3MDEV_MASTER; + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(dev_net(dev)); dev->l3mdev_ops = NULL; } @@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev, if (!data) return 0; + if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; if (data[IFLA_IPVLAN_MODE]) { u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); @@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_dev *tmp = netdev_priv(phy_dev); phy_dev = tmp->phy_dev; + if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; } else if (!netif_is_ipvlan_port(phy_dev)) { /* Exit early if the underlying link is invalid or busy */ if (phy_dev->type != ARPHRD_ETHER || diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 18b41bc345ab..6e8807212aa3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640, struct phy_txts *phy_txts) { struct skb_shared_hwtstamps shhwtstamps; + struct dp83640_skb_info *skb_info; struct sk_buff *skb; - u64 ns; u8 overflow; + u64 ns; /* We must already have the skb that triggered this. */ - +again: skb = skb_dequeue(&dp83640->tx_queue); - if (!skb) { pr_debug("have timestamp but tx_queue empty\n"); return; @@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640, } return; } + skb_info = (struct dp83640_skb_info *)skb->cb; + if (time_after(jiffies, skb_info->tmo)) { + kfree_skb(skb); + goto again; + } ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, static void dp83640_txtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { + struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; struct dp83640_private *dp83640 = phydev->priv; switch (dp83640->hwts_tx_en) { @@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, /* fall through */ case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; skb_queue_tail(&dp83640->tx_queue, skb); break; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2e12f982534f..abb7876a8776 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev) /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - /* Select page 18 */ err = marvell_set_page(phydev, 18); if (err < 0) @@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev) err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) return err; - - /* There appears to be a bug in the 88e1512 when used in - * SGMII to copper mode, where the AN advertisement register - * clears the pause bits each time a negotiation occurs. - * This means we can never be truely sure what was advertised, - * so disable Pause support. - */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); } return m88e1318_config_init(phydev); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 82ab6ed3b74e..6bac602094bd 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -26,6 +26,8 @@ #include <linux/marvell_phy.h> #include <linux/phy.h> +#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0 + enum { MV_PCS_BASE_T = 0x0000, MV_PCS_BASE_R = 0x1000, @@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev) else reg = 0; + /* Make sure we clear unsupported 2.5G/5G advertising */ ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, reg); + MDIO_AN_10GBT_CTRL_ADV10G | + MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg); if (ret < 0) return ret; if (ret > 0) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 66b9cfe692fc..7368616286ae 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); - put_device(&bus->dev); return -EINVAL; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 189cd2048c3a..c5675df5fc6f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev) if (err < 0) goto out_unlock; - if (__phy_is_started(phydev)) { + if (phy_is_started(phydev)) { if (phydev->autoneg == AUTONEG_ENABLE) { err = phy_check_link_status(phydev); } else { @@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (__phy_is_started(phydev)) + if (phy_is_started(phydev)) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } @@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) { struct phy_device *phydev = phy_dat; - if (!phy_is_started(phydev)) - return IRQ_NONE; /* It can't be ours. */ - if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) return IRQ_NONE; @@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts); */ void phy_stop(struct phy_device *phydev) { - mutex_lock(&phydev->lock); - - if (!__phy_is_started(phydev)) { + if (!phy_is_started(phydev)) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); - mutex_unlock(&phydev->lock); return; } + mutex_lock(&phydev->lock); + if (phy_interrupt_is_valid(phydev)) phy_disable_interrupts(phydev); @@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work) * state machine would be pointless and possibly error prone when * called from phy_disconnect() synchronously. */ + mutex_lock(&phydev->lock); if (phy_polling_mode(phydev) && phy_is_started(phydev)) phy_queue_state_machine(phydev, PHY_STATE_TIME); + mutex_unlock(&phydev->lock); } /** diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e7becc7379d7..938803237d7f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) +{ + unsigned long state = pl->phylink_disable_state; + + set_bit(bit, &pl->phylink_disable_state); + if (state == 0) { + queue_work(system_power_efficient_wq, &pl->resolve); + flush_work(&pl->resolve); + } +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl) if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) del_timer_sync(&pl->link_poll); - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream) ASSERT_RTNL(); - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c6010fb1aa0f..cb4a23041a94 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = { .name = "RTL8366RB Gigabit Ethernet", .features = PHY_GBIT_FEATURES, .config_init = &rtl8366rb_config_init, + /* These interrupts are handled by the irq controller + * embedded inside the RTL8366RB, they get unmasked when the + * irq is requested and ACKed by reading the status register, + * which is done by the irqchip code. + */ + .ack_interrupt = genphy_no_ack_interrupt, + .config_intr = genphy_no_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) return ret; } } + bus->socket_ops->attach(bus->sfp); if (bus->started) bus->socket_ops->start(bus->sfp); bus->netdev->sfp_bus = bus; @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->registered) { if (bus->started) bus->socket_ops->stop(bus->sfp); + bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52..68c8fbf099f8 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -184,6 +184,7 @@ struct sfp { struct gpio_desc *gpio[GPIO_MAX]; + bool attached; unsigned int state; struct delayed_work poll; struct delayed_work timeout; @@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) */ switch (sfp->sm_mod_state) { default: - if (event == SFP_E_INSERT) { + if (event == SFP_E_INSERT && sfp->attached) { sfp_module_tx_disable(sfp); sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); } @@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) mutex_unlock(&sfp->sm_mutex); } +static void sfp_attach(struct sfp *sfp) +{ + sfp->attached = true; + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); +} + +static void sfp_detach(struct sfp *sfp) +{ + sfp->attached = false; + sfp_sm_event(sfp, SFP_E_REMOVE); +} + static void sfp_start(struct sfp *sfp) { sfp_sm_event(sfp, SFP_E_DEV_UP); @@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, } static const struct sfp_socket_ops sfp_module_ops = { + .attach = sfp_attach, + .detach = sfp_detach, .start = sfp_start, .stop = sfp_stop, .module_info = sfp_module_info, @@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev) dev_info(sfp->dev, "Host maximum power %u.%uW\n", sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); - if (!sfp->sfp_bus) - return -ENOMEM; - /* Get the initial state, and always signal TX disable, * since the network interface will not be up. */ @@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev) sfp->state |= SFP_F_RATE_SELECT; sfp_set_state(sfp, sfp->state); sfp_module_tx_disable(sfp); - rtnl_lock(); - if (sfp->state & SFP_F_PRESENT) - sfp_sm_event(sfp, SFP_E_INSERT); - rtnl_unlock(); for (i = 0; i < GPIO_MAX; i++) { if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) @@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev) dev_warn(sfp->dev, "No tx_disable pin: SFP modules will always be emitting.\n"); + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + return 0; } diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -7,6 +7,8 @@ struct sfp; struct sfp_socket_ops { + void (*attach)(struct sfp *sfp); + void (*detach)(struct sfp *sfp); void (*start)(struct sfp *sfp); void (*stop)(struct sfp *sfp); int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf..bd6084e315de 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) u16 val = 0; int err; - err = priv->phy_drv->read_status(phydev); + if (priv->phy_drv->read_status) + err = priv->phy_drv->read_status(phydev); + else + err = genphy_read_status(phydev); if (err < 0) return err; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..6ce3f666d142 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } -static bool __team_option_inst_tmp_find(const struct list_head *opts, - const struct team_option_inst *needle) -{ - struct team_option_inst *opt_inst; - - list_for_each_entry(opt_inst, opts, tmp_list) - if (opt_inst == needle) - return true; - return false; -} - static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -1267,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); - __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_oper_up(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) int err = 0; int i; struct nlattr *nl_option; - LIST_HEAD(opt_inst_list); rtnl_lock(); @@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; + LIST_HEAD(opt_inst_list); enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; @@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; - - /* dumb/evil user-space can send us duplicate opt, - * keep only the last one - */ - if (__team_option_inst_tmp_find(&opt_inst_list, - opt_inst)) - continue; - list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } - } - err = team_nl_send_event_options_get(team, &opt_inst_list); + err = team_nl_send_event_options_get(team, &opt_inst_list); + if (err) + break; + } team_put: team_nl_team_put(team); @@ -2932,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: - if (netif_carrier_ok(dev)) + if (netif_oper_up(dev)) team_port_change_check(port, true); break; case NETDEV_DOWN: diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..fed298c0cb39 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); - tun_set_real_num_queues(tun); - /* device is allowed to go away first, so no need to hold extra * refcnt. */ @@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; + tun_set_real_num_queues(tun); out: return err; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..86c8c64fbb0f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -557,6 +557,7 @@ enum spd_duplex { /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 +#define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 @@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) return -ENODEV; } } else { - /* test for RTL8153-BND */ + /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); - if ((ocp_data & BND_MASK) == 0) { + if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fadd8eaf601..4cfceb789eea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) +#define VIRTIO_XDP_FLAG BIT(0) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -252,6 +254,21 @@ struct padded_vnet_hdr { char padding[4]; }; +static bool is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VIRTIO_XDP_FLAG; +} + +static void *xdp_to_ptr(struct xdp_frame *ptr) +{ + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); +} + +static struct xdp_frame *ptr_to_xdp(void *ptr) +{ + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); +} + /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ @@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; - struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; + int packets = 0; + int bytes = 0; int drops = 0; int kicks = 0; int ret, err; + void *ptr; int i; - sq = virtnet_xdp_sq(vi); - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { - ret = -EINVAL; - drops = n; - goto out; - } - /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) { - ret = -ENXIO; + if (!xdp_prog) + return -ENXIO; + + sq = virtnet_xdp_sq(vi); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; drops = n; goto out; } /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(is_xdp_frame(ptr))) { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } else { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, false); + } + packets++; + } for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; sq->stats.kicks += kicks; @@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { - struct sk_buff *skb; unsigned int len; unsigned int packets = 0; unsigned int bytes = 0; + void *ptr; - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; - bytes += skb->len; - packets++; + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); - napi_consume_skb(skb, in_napi); + bytes += frame->len; + xdp_return_frame(frame); + } + packets++; } /* Avoid overhead when no packets have been processed @@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) u64_stats_update_end(&sq->stats.syncp); } +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) struct send_queue *sq = &vi->sq[index]; struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); - if (!sq->napi.weight) + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) return; if (__netif_tx_trylock(txq)) { @@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) { struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); + unsigned int index = vq2txq(sq->vq); + struct netdev_queue *txq; + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { + /* We don't need to enable cb for XDP */ + napi_complete_done(napi, 0); + return 0; + } + + txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); @@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return -ENOMEM; } + old_prog = rtnl_dereference(vi->rq[0].xdp_prog); + if (!prog && !old_prog) + return 0; + if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); if (IS_ERR(prog)) @@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - if (netif_running(dev)) - for (i = 0; i < vi->max_queue_pairs; i++) + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } + } + + if (!prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) + virtnet_restore_guest_offloads(vi); + } + synchronize_net(); + } - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); vi->xdp_queue_pairs = xdp_qp; - for (i = 0; i < vi->max_queue_pairs; i++) { - old_prog = rtnl_dereference(vi->rq[i].xdp_prog); - rcu_assign_pointer(vi->rq[i].xdp_prog, prog); - if (i == 0) { - if (!old_prog) + if (prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); - if (!prog) - virtnet_restore_guest_offloads(vi); } + } + + for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev)) + if (netif_running(dev)) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } } return 0; err: - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (!prog) { + virtnet_clear_guest_offloads(vi); + for (i = 0; i < vi->max_queue_pairs; i++) + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); + } + + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } + } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) put_page(vi->rq[i].alloc_frag.page); } -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (!is_xdp_raw_buffer_queue(vi, i)) + if (!is_xdp_frame(buf)) dev_kfree_skb(buf); else - put_page(virt_to_head_page(buf)); + xdp_return_frame(ptr_to_xdp(buf)); } } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 95909e262ba4..7c1430ed0244 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + + dev->min_mtu = 0; + dev->max_mtu = 0; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5209ee9aac47..2aae11feff0c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; - struct net_device *dev = skb->dev; + struct net_device *dev; int len = skb->len; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); @@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, #endif } + rcu_read_lock(); + dev = skb->dev; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto drop; + } + if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, - vni); + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { +drop: dev->stats.rx_dropped++; } + rcu_read_unlock(); } static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c0b0f525c87c..27decf8ae840 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -1575,7 +1575,7 @@ try: dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dpriv->tx_skbuff[cur] = NULL; ++dpriv->tx_dirty; } else { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) memset(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 0, skb->len); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); priv->tx_skbuff[priv->skb_dirtytx] = NULL; priv->skb_dirtytx = diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..e8891f5fc83a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = WCN3990_HW_1_0_DEV_VERSION, .dev_id = 0, - .bus = ATH10K_BUS_PCI, + .bus = ATH10K_BUS_SNOC, .name = "wcn3990 hw1.0", .continuous_frag_desc = true, .tx_chain_mask = 0x7, diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -1,6 +1,6 @@ config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " - depends on PCI && HAS_IOMEM + depends on PCI && HAS_IOMEM && CFG80211 select FW_LOADER ---help--- Select to build the driver supporting the: @@ -47,6 +47,7 @@ if IWLWIFI config IWLWIFI_LEDS bool depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI + depends on IWLMVM || IWLDVM select LEDS_TRIGGERS select MAC80211_LEDS default y diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 320edcac4699..6359053bd0c7 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..b2cabce1d74d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) mt76x02_add_rate_power_offset(t, delta); } -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) +void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) { struct mt76x0_chan_map { u8 chan; u8 offset; } chan_map[] = { - { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, - { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, - { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, - { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, - { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, - { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, - { 167, 17 }, { 171, 18 }, { 173, 19 }, + { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, + { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, + { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, + { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, + { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, + { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, + { 167, 34 }, { 171, 36 }, { 175, 38 }, }; struct ieee80211_channel *chan = dev->mt76.chandef.chan; u8 offset, addr; + int i, idx = 0; u16 data; - int i; if (mt76x0_tssi_enabled(dev)) { s8 target_power; @@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) else data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; - info[0] = target_power + mt76x0_get_delta(dev); - info[1] = 0; + *tp = target_power + mt76x0_get_delta(dev); return; } for (i = 0; i < ARRAY_SIZE(chan_map); i++) { - if (chan_map[i].chan <= chan->hw_value) { + if (chan->hw_value <= chan_map[i].chan) { + idx = (chan->hw_value == chan_map[i].chan); offset = chan_map[i].offset; break; } @@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) addr = MT_EE_TX_POWER_DELTA_BW80 + offset; } else { switch (chan->hw_value) { + case 42: + offset = 2; + break; case 58: offset = 8; break; case 106: offset = 14; break; - case 112: + case 122: offset = 20; break; case 155: @@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) } data = mt76x02_eeprom_get(dev, addr); - - info[0] = data; - if (!info[0] || info[0] > 0x3f) - info[0] = 5; - - info[1] = data >> 8; - if (!info[1] || info[1] > 0x3f) - info[1] = 5; + *tp = data >> (8 * idx); + if (*tp < 0 || *tp > 0x3f) + *tp = 5; } static int mt76x0_check_eeprom(struct mt76x02_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..42b259f90b6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -26,7 +26,7 @@ struct mt76x02_dev; int mt76x0_eeprom_init(struct mt76x02_dev *dev); void mt76x0_read_rx_gain(struct mt76x02_dev *dev); void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); +void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); static inline s8 s6_to_s8(u32 val) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..b6166703ad76 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) { struct mt76_rate_power *t = &dev->mt76.rate_power; - u8 info[2]; + s8 info; mt76x0_get_tx_power_per_rate(dev); - mt76x0_get_power_info(dev, info); + mt76x0_get_power_info(dev, &info); - mt76x02_add_rate_power_offset(t, info[0]); + mt76x02_add_rate_power_offset(t, info); mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); - mt76x02_add_rate_power_offset(t, -info[0]); + mt76x02_add_rate_power_offset(t, -info); - mt76x02_phy_set_txpower(dev, info[0], info[1]); + mt76x02_phy_set_txpower(dev, info, info); } void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..a5ea3ba495a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = { .get_txpower = mt76x02_get_txpower, }; -static int mt76x0u_register_device(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = dev->mt76.hw; int err; - err = mt76u_alloc_queues(&dev->mt76); - if (err < 0) - goto out_err; - - err = mt76u_mcu_init_rx(&dev->mt76); - if (err < 0) - goto out_err; - mt76x0_chip_onoff(dev, true, true); - if (!mt76x02_wait_for_mac(&dev->mt76)) { - err = -ETIMEDOUT; - goto out_err; - } + + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; err = mt76x0u_mcu_init(dev); if (err < 0) - goto out_err; + return err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) - goto out_err; + return err; mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + return 0; +} + +static int mt76x0u_register_device(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + int err; + + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76u_mcu_init_rx(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76x0u_init_hardware(dev); + if (err < 0) + goto out_err; + err = mt76x0_register_device(dev); if (err < 0) goto out_err; @@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, mt76u_stop_queues(&dev->mt76); mt76x0u_mac_stop(dev); + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); usb_kill_urb(usb->mcu.res.urb); return 0; @@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - ret = mt76x0_init_hardware(dev); + ret = mt76x0u_init_hardware(dev); if (ret) goto err; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) } sdio_claim_host(func); + /* + * To guarantee that the SDIO card is power cycled, as required to make + * the FW programming to succeed, let's do a brute force HW reset. + */ + mmc_hw_reset(card->host); + sdio_enable_func(func); sdio_release_host(func); @@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; - int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Let runtime PM know the card is powered off */ - error = pm_runtime_put(&card->dev); - if (error < 0 && error != -EBUSY) { - dev_err(&card->dev, "%s failed: %i\n", __func__, error); - - return error; - } - + pm_runtime_put(&card->dev); return 0; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 150e49723c15..6a9dd68c0f4f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, * effects say only one namespace is affected. */ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { + mutex_lock(&ctrl->scan_lock); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } @@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) */ if (effects & NVME_CMD_EFFECTS_LBCC) nvme_update_formats(ctrl); - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); + mutex_unlock(&ctrl->scan_lock); + } if (effects & NVME_CMD_EFFECTS_CCC) nvme_init_identify(ctrl); if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) @@ -3401,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work) if (nvme_identify_ctrl(ctrl, &id)) return; + mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { @@ -3409,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work) } nvme_scan_ns_sequential(ctrl, nn); out_free_id: + mutex_unlock(&ctrl->scan_lock); kfree(id); down_write(&ctrl->namespaces_rwsem); list_sort(NULL, &ctrl->namespaces, ns_cmp); @@ -3652,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->state = NVME_CTRL_NEW; spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab961bdeea89..c4a1bb41abf0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -154,6 +154,7 @@ struct nvme_ctrl { enum nvme_ctrl_state state; bool identified; spinlock_t lock; + struct mutex scan_lock; const struct nvme_ctrl_ops *ops; struct request_queue *admin_q; struct request_queue *connect_q; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9bc585415d9b..7fee665ec45e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2557,27 +2557,18 @@ static void nvme_reset_work(struct work_struct *work) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); - /* - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the - * initializing procedure here. - */ - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { - dev_warn(dev->ctrl.device, - "failed to mark controller CONNECTING\n"); - goto out; - } - + mutex_lock(&dev->shutdown_lock); result = nvme_pci_enable(dev); if (result) - goto out; + goto out_unlock; result = nvme_pci_configure_admin_queue(dev); if (result) - goto out; + goto out_unlock; result = nvme_alloc_admin_tags(dev); if (result) - goto out; + goto out_unlock; /* * Limit the max command size to prevent iod->sg allocations going @@ -2585,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work) */ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; dev->ctrl.max_segments = NVME_MAX_SEGS; + mutex_unlock(&dev->shutdown_lock); + + /* + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the + * initializing procedure here. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + goto out; + } result = nvme_init_identify(&dev->ctrl); if (result) @@ -2649,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_start_ctrl(&dev->ctrl); return; + out_unlock: + mutex_unlock(&dev->shutdown_lock); out: nvme_remove_dead_ctrl(dev, result); } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52e47dac028f..80f843030e36 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev) imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); if (IS_ERR(imx6_pcie->pd_pcie)) return PTR_ERR(imx6_pcie->pd_pcie); + /* Do nothing when power domain missing */ + if (!imx6_pcie->pd_pcie) + return 0; link = device_link_add(dev, imx6_pcie->pd_pcie, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | @@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev) if (IS_ERR(imx6_pcie->pd_pcie_phy)) return PTR_ERR(imx6_pcie->pd_pcie_phy); - device_link_add(dev, imx6_pcie->pd_pcie_phy, + link = device_link_add(dev, imx6_pcie->pd_pcie_phy, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); - if (IS_ERR(link)) { - dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); - return PTR_ERR(link); + if (!link) { + dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); + return -EINVAL; } return 0; diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index b171b6bc15c8..0c389a30ef5d 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -22,7 +22,6 @@ #include <linux/resource.h> #include <linux/of_pci.h> #include <linux/of_irq.h> -#include <linux/gpio/consumer.h> #include "pcie-designware.h" @@ -30,7 +29,6 @@ struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; - struct gpio_desc *reset_gpio; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct armada8k_pcie *pcie = to_armada8k_pcie(pci); - if (pcie->reset_gpio) { - /* assert and then deassert the reset signal */ - gpiod_set_value_cansleep(pcie->reset_gpio, 1); - msleep(100); - gpiod_set_value_cansleep(pcie->reset_gpio, 0); - } dw_pcie_setup_rc(pp); armada8k_pcie_establish_link(pcie); @@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } - /* Get reset gpio signal and hold asserted (logically high) */ - pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_HIGH); - if (IS_ERR(pcie->reset_gpio)) { - ret = PTR_ERR(pcie->reset_gpio); - goto fail_clkreg; - } - platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..e2a879e93d86 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev) break; } } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, - quirk_synopsys_haps); +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, + quirk_synopsys_haps); /* * Let's make the southbridge information explicit instead of having to diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 05044e323ea5..03ec7a5d9d0b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, {} diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 1817786ab6aa..a005cbccb4f7 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -45,12 +45,14 @@ config PINCTRL_MT2701 config PINCTRL_MT7623 bool "Mediatek MT7623 pin control with generic binding" depends on MACH_MT7623 || COMPILE_TEST + depends on OF default MACH_MT7623 select PINCTRL_MTK_MOORE config PINCTRL_MT7629 bool "Mediatek MT7629 pin control" depends on MACH_MT7629 || COMPILE_TEST + depends on OF default MACH_MT7629 select PINCTRL_MTK_MOORE @@ -92,6 +94,7 @@ config PINCTRL_MT6797 config PINCTRL_MT7622 bool "MediaTek MT7622 pin control" + depends on OF depends on ARM64 || COMPILE_TEST default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_MOORE diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index c69ca95b1ad5..0f140a802137 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = { static const char * const sdxc_a_groups[] = { "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", - "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" + "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" }; static const char * const pcm_a_groups[] = { diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index b03481ef99a1..98905d4a79ca 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; case MCP_TYPE_S18: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); + one_regmap_config); mcp->reg_shift = 1; mcp->chip.ngpio = 16; mcp->chip.label = "mcp23s18"; diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 7aae52a09ff0..4ffd56ff809e 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c @@ -79,7 +79,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ - .tile = NORTH, \ + .tile = SOUTH, \ .mux_bit = -1, \ .pull_bit = pull, \ .drv_bit = drv, \ diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568..ef4268cc6227 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; static const struct sunxi_pinctrl_desc h6_pinctrl_data = { .pins = h6_pins, .npins = ARRAY_SIZE(h6_pins), - .irq_banks = 3, + .irq_banks = 4, .irq_bank_map = h6_irq_bank_map, .irq_read_needs_mux = true, }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5d9184d18c16..0e7fa69e93df 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; - struct regulator *reg; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; + struct regulator *reg = s_reg->regulator; + char supply[16]; int ret; - reg = s_reg->regulator; - if (!reg) { - char supply[16]; - - snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); - reg = regulator_get(pctl->dev, supply); - if (IS_ERR(reg)) { - dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", - 'A' + bank); - return PTR_ERR(reg); - } - - s_reg->regulator = reg; - refcount_set(&s_reg->refcount, 1); - } else { + if (reg) { refcount_inc(&s_reg->refcount); + return 0; + } + + snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); + reg = regulator_get(pctl->dev, supply); + if (IS_ERR(reg)) { + dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", + 'A' + bank); + return PTR_ERR(reg); } ret = regulator_enable(reg); @@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) goto out; } + s_reg->regulator = reg; + refcount_set(&s_reg->refcount, 1); + return 0; out: - if (refcount_dec_and_test(&s_reg->refcount)) { - regulator_put(s_reg->regulator); - s_reg->regulator = NULL; - } + regulator_put(s_reg->regulator); return ret; } @@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; if (!refcount_dec_and_test(&s_reg->refcount)) return 0; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e340d2a24b44..034c0317c8d6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -136,7 +136,7 @@ struct sunxi_pinctrl { struct gpio_chip *chip; const struct sunxi_pinctrl_desc *desc; struct device *dev; - struct sunxi_pinctrl_regulator regulators[12]; + struct sunxi_pinctrl_regulator regulators[9]; struct irq_domain *domain; struct sunxi_pinctrl_function *functions; unsigned nfunctions; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 5e2109c54c7c..b5e9db85e881 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -905,6 +905,7 @@ config TOSHIBA_WMI config ACPI_CMPC tristate "CMPC Laptop Extras" depends on ACPI && INPUT + depends on BACKLIGHT_LCD_SUPPORT depends on RFKILL || RFKILL=n select BACKLIGHT_CLASS_DEVICE help @@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL config SAMSUNG_Q10 tristate "Samsung Q10 Extras" depends on ACPI + depends on BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- This driver provides support for backlight control on Samsung Q10 diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) usrparm.psf_data &= 0x7fffffffULL; usrparm.rssd_result &= 0x7fffffffULL; } + /* at least 2 bytes are accessed and should be allocated */ + if (usrparm.psf_data_len < 2) { + DBF_DEV_EVENT(DBF_WARNING, device, + "Symmetrix ioctl invalid data length %d", + usrparm.psf_data_len); + rc = -EINVAL; + goto out; + } /* alloc I/O data area */ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..5a699746c357 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) static inline int ap_test_config_card_id(unsigned int id) { if (!ap_configuration) /* QCI not supported */ - return 1; + /* only ids 0...3F may be probed */ + return id < 0x40 ? 1 : 0; return ap_test_config(ap_configuration->apm, id); } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ee026947f20..122059ecad84 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -22,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/workqueue.h> #include <net/ipv6.h> #include <net/if_inet6.h> @@ -789,6 +790,7 @@ struct qeth_card { struct qeth_seqno seqno; struct qeth_card_options options; + struct workqueue_struct *event_wq; wait_queue_head_t wait_q; spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[]; extern const struct attribute_group qeth_device_attr_group; extern const struct attribute_group qeth_device_blkt_group; extern const struct device_type qeth_generic_devtype; -extern struct workqueue_struct *qeth_wq; int qeth_card_hw_is_reachable(struct qeth_card *); const char *qeth_get_cardname_short(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e63e03143ca7..89f912213e62 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); -struct workqueue_struct *qeth_wq; -EXPORT_SYMBOL_GPL(qeth_wq); +static struct workqueue_struct *qeth_wq; int qeth_card_hw_is_reachable(struct qeth_card *card) { @@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", rc, CARD_DEVID(card)); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; + if (iob) + qeth_release_buffer(iob->channel, iob); qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) CARD_RDEV(card) = gdev->cdev[0]; CARD_WDEV(card) = gdev->cdev[1]; CARD_DDEV(card) = gdev->cdev[2]; + + card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); + if (!card->event_wq) + goto out_wq; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) @@ -1481,6 +1487,8 @@ out_data: out_channel: qeth_clean_channel(&card->read); out_ip: + destroy_workqueue(card->event_wq); +out_wq: dev_set_drvdata(&gdev->dev, NULL); kfree(card); out: @@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply = qeth_alloc_reply(card); if (!reply) { + qeth_release_buffer(channel, iob); return -ENOMEM; } reply->callback = reply_cb; @@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) return 0; } -static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) { if (!q) return; + qeth_clear_outq_buffers(q, 1); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } @@ -2467,10 +2479,8 @@ out_freeoutqbufs: card->qdio.out_qs[i]->bufs[j] = NULL; } out_freeoutq: - while (i > 0) { - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - } + while (i > 0) + qeth_free_output_queue(card->qdio.out_qs[--i]); kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: @@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) qeth_free_buffer_pool(card); /* free outbound qdio_qs */ if (card->qdio.out_qs) { - for (i = 0; i < card->qdio.no_out_queues; ++i) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - qeth_free_qdio_out_buf(card->qdio.out_qs[i]); - } + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_free_output_queue(card->qdio.out_qs[i]); kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; } @@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); qeth_clean_channel(&card->data); + destroy_workqueue(card->event_wq); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f108d4b44605..a43de2f9bcac 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); + + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); } @@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, data->card = card; memcpy(&data->qports, qports, sizeof(struct qeth_sbp_state_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } struct qeth_bridge_host_data { @@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card, data->card = card; memcpy(&data->hostevs, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } /* SETBRIDGEPORT support; sending commands */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 42a7cdc59b76..df34bff4ac31 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); } /* @@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cf30d124b9e..e390f8c6d5f3 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto failed; /* report size limit per scatter-gather segment */ - adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 00acc7144bbc..f4f6a07c5222 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = { .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ + /* report size limit per scatter-gather segment */ + .max_segment_size = ZFCP_QDIO_SBALE_LEN, .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 128d658d472a..16957d7ac414 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, if(tpnt->sdev_attrs == NULL) tpnt->sdev_attrs = NCR_700_dev_attrs; - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if(memory == NULL) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f83f79b07b50..07efcb9b5b94 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", asd_dev_rev[asd_ha->revision_id]); } -static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); +static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); static ssize_t asd_show_dev_bios_build(struct device *dev, struct device_attribute *attr,char *buf) @@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) { int err; - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); if (err) return err; @@ -499,13 +499,13 @@ err_update_bios: err_biosb: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); err_rev: - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); return err; } static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) { - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 350257c13a5b..bc9f2a2365f4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) return NULL; } + cmgr->hba = hba; cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), GFP_KERNEL); if (!cmgr->free_list) { @@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) goto mem_err; } - cmgr->hba = hba; cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); for (i = 0; i < arr_sz; i++) { @@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ mem_size = num_ios * sizeof(struct io_bdt *); - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool) { printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); goto mem_err; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index bfa13e3b191c..c8bad2c093b8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev, host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; cfg = shost_priv(host); + cfg->state = STATE_PROBING; cfg->host = host; rc = alloc_mem(cfg); if (rc) { @@ -3775,6 +3776,7 @@ out: return rc; out_remove: + cfg->state = STATE_PROBED; cxlflash_remove(pdev); goto out; } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be83590ed955..ff943f477d6f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_payload_op(fp) != ELS_LS_ACC) { FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { FC_LPORT_DBG(lport, "FLOGI bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); - goto err; + goto out; } if (mfs <= lport->mfs) { diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9192a1d9dec6..dfba4921b265 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - WARN_ON(!list_empty(&rdata->peers)); kfree_rcu(rdata, rcu); } EXPORT_SYMBOL(fc_rport_destroy); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b8d325ce8754..120fc520f27a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) return -ENODATA; + spin_lock_bh(&conn->session->back_lock); + if (conn->task == NULL) { + spin_unlock_bh(&conn->session->back_lock); + return -ENODATA; + } __iscsi_get_task(task); + spin_unlock_bh(&conn->session->back_lock); spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->frwd_lock); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 17eb4185f29d..f21c93bbb35c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev( rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; + rphy->identify.phy_identifier = phy_id; child->rphy = rphy; get_device(&rphy->dev); @@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev( child->rphy = rphy; get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..8d1acc802a67 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, /* Issue Marker IOCB */ qla2x00_marker(vha, vha->hw->req_q_map[0], - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, + vha->hw->rsp_q_map[0], fcport->loop_id, lun, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); } done_free_sp: sp->free(sp); - sp->fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 661512bec3ac..e27f4df24021 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -62,7 +62,7 @@ /* make sure inq_product_rev string corresponds to this version */ #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ -static const char *sdebug_version_date = "20180128"; +static const char *sdebug_version_date = "20190125"; #define MY_NAME "scsi_debug" @@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } -static void *fake_store(unsigned long long lba) +static void *lba2fake_store(unsigned long long lba) { lba = do_div(lba, sdebug_store_sectors); @@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, return ret; } -/* If fake_store(lba,num) compares equal to arr(num), then copy top half of - * arr into fake_store(lba,num) and return true. If comparison fails then +/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of + * arr into lba2fake_store(lba,num) and return true. If comparison fails then * return false. */ static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) { @@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, if (sdt->app_tag == cpu_to_be16(0xffff)) continue; - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); if (ret) { dif_errors++; return ret; @@ -3261,10 +3261,12 @@ err_out: static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, bool unmap, bool ndob) { + int ret; unsigned long iflags; unsigned long long i; - int ret; - u64 lba_off; + u32 lb_size = sdebug_sector_size; + u64 block, lbaa; + u8 *fs1p; ret = check_device_access_params(scp, lba, num); if (ret) @@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, unmap_region(lba, num); goto out; } - - lba_off = lba * sdebug_sector_size; + lbaa = lba; + block = do_div(lbaa, sdebug_store_sectors); /* if ndob then zero 1 logical block, else fetch 1 logical block */ + fs1p = fake_storep + (block * lb_size); if (ndob) { - memset(fake_storep + lba_off, 0, sdebug_sector_size); + memset(fs1p, 0, lb_size); ret = 0; } else - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, - sdebug_sector_size); + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) + } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", - my_name, "write same", - sdebug_sector_size, ret); + my_name, "write same", lb_size, ret); /* Copy first sector to remaining blocks */ - for (i = 1 ; i < num ; i++) - memcpy(fake_storep + ((lba + i) * sdebug_sector_size), - fake_storep + lba_off, - sdebug_sector_size); - + for (i = 1 ; i < num ; i++) { + lbaa = lba + i; + block = do_div(lbaa, sdebug_store_sectors); + memmove(fake_storep + (block * lb_size), fs1p, lb_size); + } if (scsi_debug_lbp()) map_region(lba, num); out: diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d65ac584eba..f8d51c3d5582 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); return BLK_STS_TARGET; case DID_NEXUS_FAILURE: + set_host_byte(cmd, DID_OK); return BLK_STS_NEXUS; case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..5464d467e23e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { @@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); + /* + * set the default to rotational. All non-rotational devices + * support the block characteristics VPD page, which will + * cause this to be updated correctly and any device which + * doesn't support it should be treated as rotational. + */ + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 83365b29a4d8..a340af797a85 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, return -EOPNOTSUPP; /* - * Get a reply buffer for the number of requested zones plus a header. - * For ATA, buffers must be aligned to 512B. + * Get a reply buffer for the number of requested zones plus a header, + * without exceeding the device maximum command size. For ATA disks, + * buffers must be aligned to 512B. */ - buflen = roundup((nrz + 1) * 64, 512); + buflen = min(queue_max_hw_sectors(disk->queue) << 9, + roundup((nrz + 1) * 64, 512)); buf = kmalloc(buflen, gfp_mask); if (!buf) return -ENOMEM; @@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) sdkp->device->use_10_for_rw = 0; /* - * If something changed, revalidate the disk zone bitmaps once we have - * the capacity, that is on the second revalidate execution during disk - * scan and always during normal revalidate. + * Revalidate the disk zone bitmaps once the block device capacity is + * set on the second revalidate execution during disk scan and if + * something changed when executing a normal revalidate. */ - if (sdkp->first_scan) + if (sdkp->first_scan) { + sdkp->zone_blocks = zone_blocks; + sdkp->nr_zones = nr_zones; return 0; + } + if (sdkp->zone_blocks != zone_blocks || sdkp->nr_zones != nr_zones || disk->queue->nr_zones != nr_zones) { diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); static irqreturn_t portal_isr(int irq, void *ptr) { struct qman_portal *p = ptr; - - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + u32 clear = 0; if (unlikely(!is)) return IRQ_NONE; /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) + if (is & QM_PIRQ_DQRI) { __poll_portal_fast(p, QMAN_POLL_LIMIT); + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; + } /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; qm_out(&p->p, QM_REG_ISR, clear); return IRQ_HANDLED; } diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2848fa71a33d..d6248eecf123 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) return -ENODEV; priv->last_link = 0; - phy_start_aneg(phydev); + phy_start(phydev); return 0; no_phy: diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index c92bbd05516e..005de0024dd4 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch) return; } - speakup_tty->ops->send_xchar(speakup_tty, ch); + if (speakup_tty->ops->send_xchar) + speakup_tty->ops->send_xchar(speakup_tty, ch); mutex_unlock(&speakup_tty_mutex); } @@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) return; } - speakup_tty->ops->tiocmset(speakup_tty, set, clear); + if (speakup_tty->ops->tiocmset) + speakup_tty->ops->tiocmset(speakup_tty, set, clear); mutex_unlock(&speakup_tty_mutex); } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 72016d0dfca5..8e7fffbb8802 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item, return count; } +/* always zero, but attr needs to remain RW to avoid userspace breakage */ +static ssize_t pi_prot_format_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "0\n"); +} + static ssize_t pi_prot_format_store(struct config_item *item, const char *page, size_t count) { @@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc); CONFIGFS_ATTR(, emulate_pr); CONFIGFS_ATTR(, pi_prot_type); CONFIGFS_ATTR_RO(, hw_pi_prot_type); -CONFIGFS_ATTR_WO(, pi_prot_format); +CONFIGFS_ATTR(, pi_prot_format); CONFIGFS_ATTR(, pi_prot_verify); CONFIGFS_ATTR(, enforce_pr_isids); CONFIGFS_ATTR(, is_nonrot); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dfd23245f778..6fff16113628 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) cdev = __cpufreq_cooling_register(np, policy, capacitance); if (IS_ERR(cdev)) { - pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", + pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n", policy->cpu, PTR_ERR(cdev)); cdev = NULL; } diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4bfdb4a1e47d..2df059cc07e2 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) ret = of_property_read_u32(np, "polling-delay-passive", &prop); if (ret < 0) { - pr_err("missing polling-delay-passive property\n"); + pr_err("%pOFn: missing polling-delay-passive property\n", np); goto free_tz; } tz->passive_delay = prop; ret = of_property_read_u32(np, "polling-delay", &prop); if (ret < 0) { - pr_err("missing polling-delay property\n"); + pr_err("%pOFn: missing polling-delay property\n", np); goto free_tz; } tz->polling_delay = prop; diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index e2c407656fa6..c1fdbc0b6840 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, if (dmacnt == 2) { data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), GFP_KERNEL); + if (!data->dma) + return -ENOMEM; + data->dma->fn = mtk8250_dma_filter; data->dma->rx_size = MTK_UART_RX_SIZE; data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index f80a300b5d68..48bd694a5fa1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3420,6 +3420,11 @@ static int serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { int num_iomem, num_port, first_port = -1, i; + int rc; + + rc = serial_pci_is_class_communication(dev); + if (rc) + return rc; /* * Should we try to make guesses for multiport serial devices later? @@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) board = &pci_boards[ent->driver_data]; - rc = serial_pci_is_class_communication(dev); - if (rc) - return rc; - rc = serial_pci_is_blacklisted(dev); if (rc) return rc; diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c index e1a551aae336..ce81523c3113 100644 --- a/drivers/tty/serial/earlycon-riscv-sbi.c +++ b/drivers/tty/serial/earlycon-riscv-sbi.c @@ -10,13 +10,16 @@ #include <linux/serial_core.h> #include <asm/sbi.h> -static void sbi_console_write(struct console *con, - const char *s, unsigned int n) +static void sbi_putc(struct uart_port *port, int c) { - int i; + sbi_console_putchar(c); +} - for (i = 0; i < n; ++i) - sbi_console_putchar(s[i]); +static void sbi_console_write(struct console *con, + const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + uart_console_write(&dev->port, s, n, sbi_putc); } static int __init early_sbi_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5c01bb6d1c24..556f50aa1b58 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty) struct uart_port *port; unsigned long flags; + if (!state) + return; + port = uart_port_lock(state, flags); __uart_start(tty); uart_port_unlock(port, flags); @@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty) upstat_t mask = UPSTAT_SYNC_FIFO; struct uart_port *port; + if (!state) + return; + port = uart_port_ref(state); if (!port) return; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8df0fd824520..64bbeb7d7e0c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1921,7 +1921,7 @@ out_nomem: static void sci_free_irq(struct sci_port *port) { - int i; + int i, j; /* * Intentionally in reverse order so we iterate over the muxed @@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port) if (unlikely(irq < 0)) continue; + /* Check if already freed (irq was muxed) */ + for (j = 0; j < i; j++) + if (port->irqs[j] == irq) + j = i + 1; + if (j > i) + continue; + free_irq(port->irqs[i], port); kfree(port->irqstr[i]); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index cb7fcd7c0ad8..c1e9ea621f41 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) for (i = 0; i < exynos->num_clks; i++) { ret = clk_prepare_enable(exynos->clks[i]); if (ret) { - while (--i > 0) + while (i-- > 0) clk_disable_unprepare(exynos->clks[i]); return ret; } @@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev) for (i = 0; i < exynos->num_clks; i++) { ret = clk_prepare_enable(exynos->clks[i]); if (ret) { - while (--i > 0) + while (i-- > 0) clk_disable_unprepare(exynos->clks[i]); return ret; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index bed2ff42780b..6c9b76bcc2e1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; - if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { + if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 660878a19505..b77f3126580e 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev) #if defined(PLX_PCI_RDK2) /* see if PCI int for us by checking irqstat */ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); - if (!intcsr & (1 << NET2272_PCI_IRQ)) { + if (!(intcsr & (1 << NET2272_PCI_IRQ))) { spin_unlock(&dev->lock); return IRQ_NONE; } diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index eae8b1b1b45b..ffe462a657b1 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum) } if (request) { - u8 is_dma = 0; - bool short_packet = false; trace_musb_req_tx(req); if (dma && (csr & MUSB_TXCSR_DMAENAB)) { - is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); @@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) */ if ((request->zero && request->length) && (request->length % musb_ep->packet_sz == 0) - && (request->actual == request->length)) - short_packet = true; + && (request->actual == request->length)) { - if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && - (is_dma && (!dma->desired_mode || - (request->actual & - (musb_ep->packet_sz - 1))))) - short_packet = true; - - if (short_packet) { /* * On DMA completion, FIFO may not be * available yet... diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a688f7f87829..5fc6825745f2 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) channel->status = MUSB_DMA_STATUS_FREE; /* completed */ - if ((devctl & MUSB_DEVCTL_HM) - && (musb_channel->transmit) - && ((channel->desired_mode == 0) - || (channel->actual_len & - (musb_channel->max_packet_sz - 1))) - ) { + if (musb_channel->transmit && + (!channel->desired_mode || + (channel->actual_len % + musb_channel->max_packet_sz))) { u8 epnum = musb_channel->epnum; int offset = musb->io.ep_offset(epnum, MUSB_TXCSR); @@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) */ musb_ep_select(mbase, epnum); txcsr = musb_readw(mbase, offset); - txcsr &= ~(MUSB_TXCSR_DMAENAB + if (channel->desired_mode == 1) { + txcsr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); - musb_writew(mbase, offset, txcsr); - /* Send out the packet */ - txcsr &= ~MUSB_TXCSR_DMAMODE; + musb_writew(mbase, offset, txcsr); + /* Send out the packet */ + txcsr &= ~MUSB_TXCSR_DMAMODE; + txcsr |= MUSB_TXCSR_DMAENAB; + } txcsr |= MUSB_TXCSR_TXPKTRDY; musb_writew(mbase, offset, txcsr); } diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d7312eed6088..91ea3083e7ad 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -21,7 +21,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' select USB_PHY help diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 27bdb7222527..f5f0568d8533 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev) if (ret) return ret; - ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); - if (ret) - return ret; am_phy->usb_phy_gen.phy.init = am335x_init; am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; @@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev) device_set_wakeup_enable(dev, false); phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); - return 0; + return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); } static int am335x_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4bc29b586698..f1c39a3c7534 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) pdo_pps_apdo_max_voltage(snk)); port->pps_data.max_curr = min_pps_apdo_current(src, snk); port->pps_data.out_volt = min(port->pps_data.max_volt, - port->pps_data.out_volt); + max(port->pps_data.min_volt, + port->pps_data.out_volt)); port->pps_data.op_curr = min(port->pps_data.max_curr, port->pps_data.op_curr); } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index bca86bf7189f..df51a35cf537 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) n->vqs[i].rx_ring = NULL; vhost_net_buf_init(&n->vqs[i].rxq); } - vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, + UIO_MAXIOV + VHOST_NET_BATCH); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 344684f3e2e4..23593cb23dd0 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vqs[i] = &vs->vqs[i].vq; vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } - vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); + vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); vhost_scsi_init_inflight(vs, NULL); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 15a216cdd507..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) vq->indirect = kmalloc_array(UIO_MAXIOV, sizeof(*vq->indirect), GFP_KERNEL); - vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), + vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), GFP_KERNEL); - vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), + vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), GFP_KERNEL); if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; @@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) } void vhost_dev_init(struct vhost_dev *dev, - struct vhost_virtqueue **vqs, int nvqs) + struct vhost_virtqueue **vqs, int nvqs, int iov_limit) { struct vhost_virtqueue *vq; int i; @@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev, dev->iotlb = NULL; dev->mm = NULL; dev->worker = NULL; + dev->iov_limit = iov_limit; init_llist_head(&dev->work_list); init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); @@ -1787,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, len, iov, 64, VHOST_ACCESS_WO); - if (ret) + if (ret < 0) return ret; for (i = 0; i < ret; i++) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 1b675dad5e05..9490e7ddb340 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -170,9 +170,11 @@ struct vhost_dev { struct list_head read_list; struct list_head pending_list; wait_queue_head_t wait; + int iov_limit; }; -void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); +void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, + int nvqs, int iov_limit); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 3fbc068eaa9b..bb5fc0e9fbc2 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; - vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); + vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV); file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cd7e755484e3..a0b07c331255 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -152,7 +152,12 @@ struct vring_virtqueue { /* Available for packed ring */ struct { /* Actual memory layout for this queue. */ - struct vring_packed vring; + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; /* Driver ring wrap counter. */ bool avail_wrap_counter; @@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed( !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + vq->packed.ring_dma_addr = ring_dma_addr; vq->packed.driver_event_dma_addr = driver_event_dma_addr; vq->packed.device_event_dma_addr = device_event_dma_addr; @@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + vq->split.queue_dma_addr = 0; vq->split.queue_size_in_bytes = 0; @@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_RING_PACKED: break; + case VIRTIO_F_ORDER_PLATFORM: + break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); @@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) if (unlikely(!req->ki_filp)) return -EBADF; req->ki_complete = aio_complete_rw; + req->private = NULL; req->ki_pos = iocb->aio_offset; req->ki_flags = iocb_flags(req->ki_filp); if (iocb->aio_flags & IOCB_FLAG_RESFD) diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c index d441244b79df..28d9c2b1b3bb 100644 --- a/fs/autofs/expire.c +++ b/fs/autofs/expire.c @@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb, pkt.len = dentry->d_name.len; memcpy(pkt.name, dentry->d_name.name, pkt.len); pkt.name[pkt.len] = '\0'; - dput(dentry); if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) ret = -EFAULT; @@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb, complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); + dput(dentry); + return ret; } diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 0e8ea2d9a2bb..078992eee299 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) } root_inode = autofs_get_inode(s, S_IFDIR | 0755); root = d_make_root(root_inode); - if (!root) + if (!root) { + ret = -ENOMEM; goto fail_ino; + } pipe = NULL; root->d_fsdata = ino; diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index d0078cbb718b..e996174cbfc0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -14,13 +14,30 @@ #include <linux/err.h> #include <linux/fs.h> +static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } +static inline char *next_non_spacetab(char *first, const char *last) +{ + for (; first <= last; first++) + if (!spacetab(*first)) + return first; + return NULL; +} +static inline char *next_terminator(char *first, const char *last) +{ + for (; first <= last; first++) + if (spacetab(*first) || !*first) + return first; + return NULL; +} + static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; - char *cp; + char *cp, *buf_end; struct file *file; int retval; + /* Not ours to exec if we don't start with "#!". */ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) return -ENOEXEC; @@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm) if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) return -ENOENT; - /* - * This section does the #! interpretation. - * Sorta complicated, but hopefully it will work. -TYT - */ - + /* Release since we are not mapping a binary into memory. */ allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; - for (cp = bprm->buf+2;; cp++) { - if (cp >= bprm->buf + BINPRM_BUF_SIZE) + /* + * This section handles parsing the #! line into separate + * interpreter path and argument strings. We must be careful + * because bprm->buf is not yet guaranteed to be NUL-terminated + * (though the buffer will have trailing NUL padding when the + * file size was smaller than the buffer size). + * + * We do not want to exec a truncated interpreter path, so either + * we find a newline (which indicates nothing is truncated), or + * we find a space/tab/NUL after the interpreter path (which + * itself may be preceded by spaces/tabs). Truncating the + * arguments is fine: the interpreter can re-read the script to + * parse them on its own. + */ + buf_end = bprm->buf + sizeof(bprm->buf) - 1; + cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); + if (!cp) { + cp = next_non_spacetab(bprm->buf + 2, buf_end); + if (!cp) + return -ENOEXEC; /* Entire buf is spaces/tabs */ + /* + * If there is no later space/tab/NUL we must assume the + * interpreter path is truncated. + */ + if (!next_terminator(cp, buf_end)) return -ENOEXEC; - if (!*cp || (*cp == '\n')) - break; + cp = buf_end; } + /* NUL-terminate the buffer and any trailing spaces/tabs. */ *cp = '\0'; - while (cp > bprm->buf) { cp--; if ((*cp == ' ') || (*cp == '\t')) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f64aad613727..5a6c39b44c84 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, return 0; } +static struct extent_buffer *alloc_tree_block_no_bg_flush( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 parent_start, + const struct btrfs_disk_key *disk_key, + int level, + u64 hint, + u64 empty_size) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct extent_buffer *ret; + + /* + * If we are COWing a node/leaf from the extent, chunk, device or free + * space trees, make sure that we do not finish block group creation of + * pending block groups. We do this to avoid a deadlock. + * COWing can result in allocation of a new chunk, and flushing pending + * block groups (btrfs_create_pending_block_groups()) can be triggered + * when finishing allocation of a new chunk. Creation of a pending block + * group modifies the extent, chunk, device and free space trees, + * therefore we could deadlock with ourselves since we are holding a + * lock on an extent buffer that btrfs_create_pending_block_groups() may + * try to COW later. + * For similar reasons, we also need to delay flushing pending block + * groups when splitting a leaf or node, from one of those trees, since + * we are holding a write lock on it and its parent or when inserting a + * new root node for one of those trees. + */ + if (root == fs_info->extent_root || + root == fs_info->chunk_root || + root == fs_info->dev_root || + root == fs_info->free_space_root) + trans->can_flush_pending_bgs = false; + + ret = btrfs_alloc_tree_block(trans, root, parent_start, + root->root_key.objectid, disk_key, level, + hint, empty_size); + trans->can_flush_pending_bgs = true; + + return ret; +} + /* * does the dirty work in cow of a single block. The parent block (if * supplied) is updated to point to the new cow copy. The new buffer is marked @@ -1015,28 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) parent_start = parent->start; - /* - * If we are COWing a node/leaf from the extent, chunk, device or free - * space trees, make sure that we do not finish block group creation of - * pending block groups. We do this to avoid a deadlock. - * COWing can result in allocation of a new chunk, and flushing pending - * block groups (btrfs_create_pending_block_groups()) can be triggered - * when finishing allocation of a new chunk. Creation of a pending block - * group modifies the extent, chunk, device and free space trees, - * therefore we could deadlock with ourselves since we are holding a - * lock on an extent buffer that btrfs_create_pending_block_groups() may - * try to COW later. - */ - if (root == fs_info->extent_root || - root == fs_info->chunk_root || - root == fs_info->dev_root || - root == fs_info->free_space_root) - trans->can_flush_pending_bgs = false; - - cow = btrfs_alloc_tree_block(trans, root, parent_start, - root->root_key.objectid, &disk_key, level, - search_start, empty_size); - trans->can_flush_pending_bgs = true; + cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, + level, search_start, empty_size); if (IS_ERR(cow)) return PTR_ERR(cow); @@ -3345,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, else btrfs_node_key(lower, &lower_key, 0); - c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, - &lower_key, level, root->node->start, 0); + c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, + root->node->start, 0); if (IS_ERR(c)) return PTR_ERR(c); @@ -3475,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans, mid = (c_nritems + 1) / 2; btrfs_node_key(c, &disk_key, mid); - split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, - &disk_key, level, c->start, 0); + split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, + c->start, 0); if (IS_ERR(split)) return PTR_ERR(split); @@ -4260,8 +4282,8 @@ again: else btrfs_item_key(l, &disk_key, mid); - right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, - &disk_key, 0, l->start, 0); + right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, + l->start, 0); if (IS_ERR(right)) return PTR_ERR(right); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c5586ffd1426..0a3f122dd61f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, flags | SB_RDONLY, device_name, data); if (IS_ERR(mnt_root)) { root = ERR_CAST(mnt_root); + kfree(subvol_name); goto out; } @@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, if (error < 0) { root = ERR_PTR(error); mntput(mnt_root); + kfree(subvol_name); goto out; } } } if (IS_ERR(mnt_root)) { root = ERR_CAST(mnt_root); + kfree(subvol_name); goto out; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 127fa1535f58..4ec2b660d014 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_chunk_metadata(trans); - if (lock && should_end_transaction(trans) && - READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { - spin_lock(&info->trans_lock); - if (cur_trans->state == TRANS_STATE_RUNNING) - cur_trans->state = TRANS_STATE_BLOCKED; - spin_unlock(&info->trans_lock); - } - if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { if (throttle) return btrfs_commit_transaction(trans); @@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) kmem_cache_free(btrfs_trans_handle_cachep, trans); } +/* + * Release reserved delayed ref space of all pending block groups of the + * transaction and remove them from the list + */ +static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *block_group, *tmp; + + list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { + btrfs_delayed_refs_rsv_release(fs_info, 1); + list_del_init(&block_group->bg_list); + } +} + static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) { /* @@ -2270,6 +2277,7 @@ scrub_continue: btrfs_scrub_continue(fs_info); cleanup_transaction: btrfs_trans_release_metadata(trans); + btrfs_cleanup_pending_block_groups(trans); btrfs_trans_release_chunk_metadata(trans); trans->block_rsv = NULL; btrfs_warn(fs_info, "Skipping commit of aborted transaction."); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3e4f8f88353e..15561926ab32 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path, else fs_devices = alloc_fs_devices(disk_super->fsid, NULL); - fs_devices->fsid_change = fsid_change_in_progress; - if (IS_ERR(fs_devices)) return ERR_CAST(fs_devices); + fs_devices->fsid_change = fsid_change_in_progress; + mutex_lock(&fs_devices->device_list_mutex); list_add(&fs_devices->fs_list, &fs_uuids); diff --git a/fs/buffer.c b/fs/buffer.c index 52d024bfdbc1..48318fb74938 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) struct buffer_head *head; struct page *page; int all_mapped = 1; + static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); @@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) * file io on the block device and getblk. It gets dealt with * elsewhere, don't buffer_error if we had some unmapped buffers */ - if (all_mapped) { - printk("__find_get_block_slow() failed. " - "block=%llu, b_blocknr=%llu\n", - (unsigned long long)block, - (unsigned long long)bh->b_blocknr); - printk("b_state=0x%08lx, b_size=%zu\n", - bh->b_state, bh->b_size); - printk("device %pg blocksize: %d\n", bdev, - 1 << bd_inode->i_blkbits); + ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); + if (all_mapped && __ratelimit(&last_warned)) { + printk("__find_get_block_slow() failed. block=%llu, " + "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " + "device %pg blocksize: %d\n", + (unsigned long long)block, + (unsigned long long)bh->b_blocknr, + bh->b_state, bh->b_size, bdev, + 1 << bd_inode->i_blkbits); } out_unlock: spin_unlock(&bd_mapping->private_lock); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 041c27ea8de1..f74193da0e09 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, capsnap->size); spin_lock(&mdsc->snap_flush_lock); - list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); + if (list_empty(&ci->i_snap_flush_item)) + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); spin_unlock(&mdsc->snap_flush_lock); return 1; /* caller may want to ceph_flush_snaps */ } diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index d1f9c2f3f575..7652551a1fc4 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.16" +#define CIFS_VERSION "2.17" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 2c7689f3998d..659ce1b92c44 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2696,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, rc = cifs_write_allocate_pages(wdata->pages, nr_pages); if (rc) { + kvfree(wdata->pages); kfree(wdata); add_credits_and_wake_if(server, credits, 0); break; @@ -2707,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, if (rc) { for (i = 0; i < nr_pages; i++) put_page(wdata->pages[i]); + kvfree(wdata->pages); kfree(wdata); add_credits_and_wake_if(server, credits, 0); break; @@ -3386,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, } rc = cifs_read_allocate_pages(rdata, npages); - if (rc) - goto error; + if (rc) { + kvfree(rdata->pages); + kfree(rdata); + add_credits_and_wake_if(server, credits, 0); + break; + } rdata->tailsz = PAGE_SIZE; } @@ -3407,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, if (!rdata->cfile->invalidHandle || !(rc = cifs_reopen_file(rdata->cfile, true))) rc = server->ops->async_readv(rdata); -error: if (rc) { add_credits_and_wake_if(server, rdata->credits, 0); kref_put(&rdata->refcount, diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 153238fc4fa9..6f96e2292856 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -866,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, FILE_READ_EA, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, - SMB2_MAX_EA_BUF, + CIFSMaxBufSize - + MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE, &rsp_iov, &buftype, cifs_sb); if (rc) { /* diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2ff209ec4fab..77b3aaa39b35 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -3241,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->mr = NULL; } #endif - if (rdata->result) + if (rdata->result && rdata->result != -ENODATA) { cifs_stats_fail_inc(tcon, SMB2_READ_HE); + trace_smb3_read_err(0 /* xid */, + rdata->cfile->fid.persistent_fid, + tcon->tid, tcon->ses->Suid, rdata->offset, + rdata->bytes, rdata->result); + } else + trace_smb3_read_done(0 /* xid */, + rdata->cfile->fid.persistent_fid, + tcon->tid, tcon->ses->Suid, + rdata->offset, rdata->got_bytes); queue_work(cifsiod_wq, &rdata->work); DeleteMidQEntry(mid); @@ -3317,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata) if (rc) { kref_put(&rdata->refcount, cifs_readdata_release); cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); - trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, - io_parms.tcon->tid, io_parms.tcon->ses->Suid, - io_parms.offset, io_parms.length); - } else - trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, - io_parms.tcon->tid, io_parms.tcon->ses->Suid, - io_parms.offset, io_parms.length); + trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, + io_parms.tcon->tid, + io_parms.tcon->ses->Suid, + io_parms.offset, io_parms.length, rc); + } cifs_small_buf_release(buf); return rc; @@ -3367,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, if (rc != -ENODATA) { cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); + trace_smb3_read_err(xid, req->PersistentFileId, + io_parms->tcon->tid, ses->Suid, + io_parms->offset, io_parms->length, + rc); } - trace_smb3_read_err(rc, xid, req->PersistentFileId, - io_parms->tcon->tid, ses->Suid, - io_parms->offset, io_parms->length); free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc == -ENODATA ? 0 : rc; } else @@ -3459,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid) wdata->mr = NULL; } #endif - if (wdata->result) + if (wdata->result) { cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); + trace_smb3_write_err(0 /* no xid */, + wdata->cfile->fid.persistent_fid, + tcon->tid, tcon->ses->Suid, wdata->offset, + wdata->bytes, wdata->result); + } else + trace_smb3_write_done(0 /* no xid */, + wdata->cfile->fid.persistent_fid, + tcon->tid, tcon->ses->Suid, + wdata->offset, wdata->bytes); queue_work(cifsiod_wq, &wdata->work); DeleteMidQEntry(mid); @@ -3602,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata, wdata->bytes, rc); kref_put(&wdata->refcount, release); cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); - } else - trace_smb3_write_done(0 /* no xid */, req->PersistentFileId, - tcon->tid, tcon->ses->Suid, wdata->offset, - wdata->bytes); + } async_writev_out: cifs_small_buf_release(req); @@ -3831,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { srch_inf->endOfSearch = true; rc = 0; - } - cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); + } else + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); goto qdir_exit; } @@ -4427,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); - please_key_low = (__u64 *)req->LeaseKey; - please_key_high = (__u64 *)(req->LeaseKey+8); + please_key_low = (__u64 *)lease_key; + please_key_high = (__u64 *)(lease_key+8); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 7a2d0a2255e6..538e2299805f 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -84,8 +84,9 @@ #define NUMBER_OF_SMB2_COMMANDS 0x0013 -/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ -#define MAX_SMB2_HDR_SIZE 0x00b0 +/* 52 transform hdr + 64 hdr + 88 create rsp */ +#define SMB2_TRANSFORM_HEADER_SIZE 52 +#define MAX_SMB2_HDR_SIZE 204 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) @@ -648,6 +649,13 @@ struct smb2_create_req { __u8 Buffer[0]; } __packed; +/* + * Maximum size of a SMB2_CREATE response is 64 (smb2 header) + + * 88 (fixed part of create response) + 520 (path) + 150 (contexts) + + * 2 bytes of padding. + */ +#define MAX_SMB2_CREATE_RESPONSE_SIZE 824 + struct smb2_create_rsp { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* Must be 89 */ @@ -996,6 +1004,11 @@ struct smb2_close_req { __u64 VolatileFileId; /* opaque endianness */ } __packed; +/* + * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data) + */ +#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124 + struct smb2_close_rsp { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* 60 */ @@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */ char FileName[0]; /* Name to be assigned to new link */ } __packed; /* level 11 Set */ -#define SMB2_MAX_EA_BUF 65536 - struct smb2_file_full_ea_info { /* encoding of response for level 15 */ __le32 next_entry_offset; __u8 flags; diff --git a/fs/dcache.c b/fs/dcache.c index 2593153471cf..aac41adf4743 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = { static DEFINE_PER_CPU(long, nr_dentry); static DEFINE_PER_CPU(long, nr_dentry_unused); +static DEFINE_PER_CPU(long, nr_dentry_negative); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) @@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void) return sum < 0 ? 0 : sum; } +static long get_nr_dentry_negative(void) +{ + int i; + long sum = 0; + + for_each_possible_cpu(i) + sum += per_cpu(nr_dentry_negative, i); + return sum < 0 ? 0 : sum; +} + int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { dentry_stat.nr_dentry = get_nr_dentry(); dentry_stat.nr_unused = get_nr_dentry_unused(); + dentry_stat.nr_negative = get_nr_dentry_negative(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #endif @@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry) flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); WRITE_ONCE(dentry->d_flags, flags); dentry->d_inode = NULL; + if (dentry->d_flags & DCACHE_LRU_LIST) + this_cpu_inc(nr_dentry_negative); } static void dentry_free(struct dentry *dentry) @@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry) * The per-cpu "nr_dentry_unused" counters are updated with * the DCACHE_LRU_LIST bit. * + * The per-cpu "nr_dentry_negative" counters are only updated + * when deleted from or added to the per-superblock LRU list, not + * from/to the shrink list. That is to avoid an unneeded dec/inc + * pair when moving from LRU to shrink list in select_collect(). + * * These helper functions make sure we always follow the * rules. d_lock must be held by the caller. */ @@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry) D_FLAG_VERIFY(dentry, 0); dentry->d_flags |= DCACHE_LRU_LIST; this_cpu_inc(nr_dentry_unused); + if (d_is_negative(dentry)) + this_cpu_inc(nr_dentry_negative); WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); } @@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry) D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); + if (d_is_negative(dentry)) + this_cpu_dec(nr_dentry_negative); WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); } @@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); + if (d_is_negative(dentry)) + this_cpu_dec(nr_dentry_negative); list_lru_isolate(lru, &dentry->d_lru); } @@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags |= DCACHE_SHRINK_LIST; + if (d_is_negative(dentry)) + this_cpu_dec(nr_dentry_negative); list_lru_isolate_move(lru, &dentry->d_lru, list); } @@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, */ void shrink_dcache_sb(struct super_block *sb) { - long freed; - do { LIST_HEAD(dispose); - freed = list_lru_walk(&sb->s_dentry_lru, + list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate_shrink, &dispose, 1024); - - this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); } while (list_lru_count(&sb->s_dentry_lru) > 0); } @@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) WARN_ON(d_in_lookup(dentry)); spin_lock(&dentry->d_lock); + /* + * Decrement negative dentry count if it was in the LRU list. + */ + if (dentry->d_flags & DCACHE_LRU_LIST) + this_cpu_dec(nr_dentry_negative); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); raw_write_seqcount_begin(&dentry->d_seq); __d_set_inode_and_type(dentry, inode, add_flags); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 13b01351dd1c..29c68c5d44d5 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry) inode_unlock(d_inode(dentry->d_parent)); dput(dentry); simple_release_fs(&debugfs_mount, &debugfs_mount_count); - return NULL; + return ERR_PTR(-ENOMEM); } static struct dentry *end_creating(struct dentry *dentry) @@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, dentry = start_creating(name, parent); if (IS_ERR(dentry)) - return NULL; + return dentry; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) @@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %NULL will be returned. + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be + * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. @@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %NULL will be returned. + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be + * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. @@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size); * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %NULL will be returned. + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be + * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. @@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) struct inode *inode; if (IS_ERR(dentry)) - return NULL; + return dentry; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) @@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name, struct inode *inode; if (IS_ERR(dentry)) - return NULL; + return dentry; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) @@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount); * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the symbolic * link is to be removed (no automatic cleanup happens if your module is - * unloaded, you are responsible here.) If an error occurs, %NULL will be - * returned. + * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) + * will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. @@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, struct inode *inode; char *link = kstrdup(target, GFP_KERNEL); if (!link) - return NULL; + return ERR_PTR(-ENOMEM); dentry = start_creating(name, parent); if (IS_ERR(dentry)) { kfree(link); - return NULL; + return dentry; } inode = debugfs_get_inode(dentry->d_sb); @@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *dentry = NULL, *trap; struct name_snapshot old_name; + if (IS_ERR(old_dir)) + return old_dir; + if (IS_ERR(new_dir)) + return new_dir; + if (IS_ERR_OR_NULL(old_dentry)) + return old_dentry; + trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) @@ -820,7 +830,9 @@ exit: if (dentry && !IS_ERR(dentry)) dput(dentry); unlock_rename(new_dir, old_dir); - return NULL; + if (IS_ERR(dentry)) + return dentry; + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(debugfs_rename); diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 82377017130f..d31b6c72b476 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); + /* + * We must skip inodes in unusual state. We may also skip + * inodes without pages but we deliberately won't in case + * we need to reschedule to avoid softlockups. + */ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || - (inode->i_mapping->nrpages == 0)) { + (inode->i_mapping->nrpages == 0 && !need_resched())) { spin_unlock(&inode->i_lock); continue; } @@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); + cond_resched(); invalidate_mapping_pages(inode->i_mapping, 0, -1); iput(toput_inode); toput_inode = inode; diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 712f00995390..5508baa11bb6 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) goto out; } - ret = file_write_and_wait_range(file, start, end); - if (ret) - return ret; - if (!journal) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL - }; - - ret = ext4_write_inode(inode, &wbc); + ret = __generic_file_fsync(file, start, end, datasync); if (!ret) ret = ext4_sync_parent(inode); if (test_opt(inode->i_sb, BARRIER)) @@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) goto out; } + ret = file_write_and_wait_range(file, start, end); + if (ret) + return ret; /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a5e516a40e7a..809c0f2f9942 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->in.h.nodeid = outarg->nodeid; req->in.numargs = 2; req->in.argpages = 1; - req->page_descs[0].offset = offset; req->end = fuse_retrieve_end; index = outarg->offset >> PAGE_SHIFT; @@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, this_num = min_t(unsigned, num, PAGE_SIZE - offset); req->pages[req->num_pages] = page; + req->page_descs[req->num_pages].offset = offset; req->page_descs[req->num_pages].length = this_num; req->num_pages++; @@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, ret = fuse_dev_do_write(fud, &cs, len); + pipe_lock(pipe); for (idx = 0; idx < nbuf; idx++) pipe_buf_release(pipe, &bufs[idx]); + pipe_unlock(pipe); out: kvfree(bufs); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ffaffe18352a..a59c16bd90ac 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, spin_unlock(&fc->lock); dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(page, NR_WRITEBACK_TEMP); + dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP); wb_writeout_inc(&bdi->wb); fuse_writepage_free(fc, new_req); fuse_request_free(new_req); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 76baaa6be393..c2d4099429be 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns) get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->user_ns = get_user_ns(user_ns); + fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; } EXPORT_SYMBOL_GPL(fuse_conn_init); @@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); - fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; /* Used by get_root_inode() */ sb->s_fs_info = fc; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f15b4c57c4bd..78510ab91835 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -28,7 +28,6 @@ #include "util.h" #include "trans.h" #include "dir.h" -#include "lops.h" struct workqueue_struct *gfs2_freeze_wq; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 5bfaf381921a..b8830fda51e8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, lh->lh_crc = cpu_to_be32(crc); gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); + gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags); log_flush_wait(sdp); } @@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) gfs2_ordered_write(sdp); lops_before_commit(sdp, tr); - gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); + gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0); if (sdp->sd_log_head != sdp->sd_log_flush_head) { log_flush_wait(sdp); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 94dcab655bc0..2295042bc625 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -17,9 +17,7 @@ #include <linux/bio.h> #include <linux/fs.h> #include <linux/list_sort.h> -#include <linux/blkdev.h> -#include "bmap.h" #include "dir.h" #include "gfs2.h" #include "incore.h" @@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, /** * gfs2_end_log_write - end of i/o to the log * @bio: The bio + * @error: Status of i/o request * * Each bio_vec contains either data from the pagecache or data * relating to the log itself. Here we iterate over the bio_vec @@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio) /** * gfs2_log_submit_bio - Submit any pending log bio * @biop: Address of the bio pointer - * @opf: REQ_OP | op_flags + * @op: REQ_OP + * @op_flags: req_flag_bits * * Submit any pending part-built or full bio to the block device. If * there is no pending bio, then this is a no-op. */ -void gfs2_log_submit_bio(struct bio **biop, int opf) +void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags) { struct bio *bio = *biop; if (bio) { struct gfs2_sbd *sdp = bio->bi_private; atomic_inc(&sdp->sd_log_in_flight); - bio->bi_opf = opf; + bio_set_op_attrs(bio, op, op_flags); submit_bio(bio); *biop = NULL; } @@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, nblk >>= sdp->sd_fsb2bb_shift; if (blkno == nblk && !flush) return bio; - gfs2_log_submit_bio(biop, op); + gfs2_log_submit_bio(biop, op, 0); } *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); @@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) gfs2_log_bmap(sdp)); } -/** - * gfs2_end_log_read - end I/O callback for reads from the log - * @bio: The bio - * - * Simply unlock the pages in the bio. The main thread will wait on them and - * process them in order as necessary. - */ - -static void gfs2_end_log_read(struct bio *bio) -{ - struct page *page; - struct bio_vec *bvec; - int i; - - bio_for_each_segment_all(bvec, bio, i) { - page = bvec->bv_page; - if (bio->bi_status) { - int err = blk_status_to_errno(bio->bi_status); - - SetPageError(page); - mapping_set_error(page->mapping, err); - } - unlock_page(page); - } - - bio_put(bio); -} - -/** - * gfs2_jhead_pg_srch - Look for the journal head in a given page. - * @jd: The journal descriptor - * @page: The page to look in - * - * Returns: 1 if found, 0 otherwise. - */ - -static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, - struct gfs2_log_header_host *head, - struct page *page) -{ - struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - struct gfs2_log_header_host uninitialized_var(lh); - void *kaddr = kmap_atomic(page); - unsigned int offset; - bool ret = false; - - for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { - if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { - if (lh.lh_sequence > head->lh_sequence) - *head = lh; - else { - ret = true; - break; - } - } - } - kunmap_atomic(kaddr); - return ret; -} - -/** - * gfs2_jhead_process_page - Search/cleanup a page - * @jd: The journal descriptor - * @index: Index of the page to look into - * @done: If set, perform only cleanup, else search and set if found. - * - * Find the page with 'index' in the journal's mapping. Search the page for - * the journal head if requested (cleanup == false). Release refs on the - * page so the page cache can reclaim it (put_page() twice). We grabbed a - * reference on this page two times, first when we did a find_or_create_page() - * to obtain the page to add it to the bio and second when we do a - * find_get_page() here to get the page to wait on while I/O on it is being - * completed. - * This function is also used to free up a page we might've grabbed but not - * used. Maybe we added it to a bio, but not submitted it for I/O. Or we - * submitted the I/O, but we already found the jhead so we only need to drop - * our references to the page. - */ - -static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, - struct gfs2_log_header_host *head, - bool *done) -{ - struct page *page; - - page = find_get_page(jd->jd_inode->i_mapping, index); - wait_on_page_locked(page); - - if (PageError(page)) - *done = true; - - if (!*done) - *done = gfs2_jhead_pg_srch(jd, head, page); - - put_page(page); /* Once for find_get_page */ - put_page(page); /* Once more for find_or_create_page */ -} - -/** - * gfs2_find_jhead - find the head of a log - * @jd: The journal descriptor - * @head: The log descriptor for the head of the log is returned here - * - * Do a search of a journal by reading it in large chunks using bios and find - * the valid log entry with the highest sequence number. (i.e. the log head) - * - * Returns: 0 on success, errno otherwise - */ - -int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) -{ - struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - struct address_space *mapping = jd->jd_inode->i_mapping; - struct gfs2_journal_extent *je; - u32 block, read_idx = 0, submit_idx = 0, index = 0; - int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; - int blocks_per_page = 1 << shift, sz, ret = 0; - struct bio *bio = NULL; - struct page *page; - bool done = false; - errseq_t since; - - memset(head, 0, sizeof(*head)); - if (list_empty(&jd->extent_list)) - gfs2_map_journal_extents(sdp, jd); - - since = filemap_sample_wb_err(mapping); - list_for_each_entry(je, &jd->extent_list, list) { - for (block = 0; block < je->blocks; block += blocks_per_page) { - index = (je->lblock + block) >> shift; - - page = find_or_create_page(mapping, index, GFP_NOFS); - if (!page) { - ret = -ENOMEM; - done = true; - goto out; - } - - if (bio) { - sz = bio_add_page(bio, page, PAGE_SIZE, 0); - if (sz == PAGE_SIZE) - goto page_added; - submit_idx = index; - submit_bio(bio); - bio = NULL; - } - - bio = gfs2_log_alloc_bio(sdp, - je->dblock + (index << shift), - gfs2_end_log_read); - bio->bi_opf = REQ_OP_READ; - sz = bio_add_page(bio, page, PAGE_SIZE, 0); - gfs2_assert_warn(sdp, sz == PAGE_SIZE); - -page_added: - if (submit_idx <= read_idx + BIO_MAX_PAGES) { - /* Keep at least one bio in flight */ - continue; - } - - gfs2_jhead_process_page(jd, read_idx++, head, &done); - if (done) - goto out; /* found */ - } - } - -out: - if (bio) - submit_bio(bio); - while (read_idx <= index) - gfs2_jhead_process_page(jd, read_idx++, head, &done); - - if (!ret) - ret = filemap_check_wb_err(mapping, since); - - return ret; -} - static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, u32 ld_length, u32 ld_data1) { diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 331160fc568b..711c4d89c063 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp); extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, unsigned size, unsigned offset, u64 blkno); extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); -extern void gfs2_log_submit_bio(struct bio **biop, int opf); +extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags); extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); -extern int gfs2_find_jhead(struct gfs2_jdesc *jd, - struct gfs2_log_header_host *head); static inline unsigned int buf_limit(struct gfs2_sbd *sdp) { diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 1179763f6370..b041cb8ae383 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -41,7 +41,6 @@ #include "dir.h" #include "meta_io.h" #include "trace_gfs2.h" -#include "lops.h" #define DO 0 #define UNDO 1 diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 7389e445a7a7..2dac43065382 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, } /** + * find_good_lh - find a good log header + * @jd: the journal + * @blk: the segment to start searching from + * @lh: the log header to fill in + * @forward: if true search forward in the log, else search backward + * + * Call get_log_header() to get a log header for a segment, but if the + * segment is bad, either scan forward or backward until we find a good one. + * + * Returns: errno + */ + +static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, + struct gfs2_log_header_host *head) +{ + unsigned int orig_blk = *blk; + int error; + + for (;;) { + error = get_log_header(jd, *blk, head); + if (error <= 0) + return error; + + if (++*blk == jd->jd_blocks) + *blk = 0; + + if (*blk == orig_blk) { + gfs2_consist_inode(GFS2_I(jd->jd_inode)); + return -EIO; + } + } +} + +/** + * jhead_scan - make sure we've found the head of the log + * @jd: the journal + * @head: this is filled in with the log descriptor of the head + * + * At this point, seg and lh should be either the head of the log or just + * before. Scan forward until we find the head. + * + * Returns: errno + */ + +static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) +{ + unsigned int blk = head->lh_blkno; + struct gfs2_log_header_host lh; + int error; + + for (;;) { + if (++blk == jd->jd_blocks) + blk = 0; + + error = get_log_header(jd, blk, &lh); + if (error < 0) + return error; + if (error == 1) + continue; + + if (lh.lh_sequence == head->lh_sequence) { + gfs2_consist_inode(GFS2_I(jd->jd_inode)); + return -EIO; + } + if (lh.lh_sequence < head->lh_sequence) + break; + + *head = lh; + } + + return 0; +} + +/** + * gfs2_find_jhead - find the head of a log + * @jd: the journal + * @head: the log descriptor for the head of the log is returned here + * + * Do a binary search of a journal and find the valid log entry with the + * highest sequence number. (i.e. the log head) + * + * Returns: errno + */ + +int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) +{ + struct gfs2_log_header_host lh_1, lh_m; + u32 blk_1, blk_2, blk_m; + int error; + + blk_1 = 0; + blk_2 = jd->jd_blocks - 1; + + for (;;) { + blk_m = (blk_1 + blk_2) / 2; + + error = find_good_lh(jd, &blk_1, &lh_1); + if (error) + return error; + + error = find_good_lh(jd, &blk_m, &lh_m); + if (error) + return error; + + if (blk_1 == blk_m || blk_m == blk_2) + break; + + if (lh_1.lh_sequence <= lh_m.lh_sequence) + blk_1 = blk_m; + else + blk_2 = blk_m; + } + + error = jhead_scan(jd, &lh_1); + if (error) + return error; + + *head = lh_1; + + return error; +} + +/** * foreach_descriptor - go through the active part of the log * @jd: the journal * @start: the first log header in the active region diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 99575ab81202..11d81248be85 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); +extern int gfs2_find_jhead(struct gfs2_jdesc *jd, + struct gfs2_log_header_host *head); extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); extern void gfs2_recover_func(struct work_struct *work); extern int __get_log_header(struct gfs2_sbd *sdp, diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 831d7cb5a49c..17a8d3b43990 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, goto next_iter; } if (ret == -E2BIG) { - n += rbm->bii - initial_bii; rbm->bii = 0; rbm->offset = 0; + n += (rbm->bii - initial_bii); goto res_covered_end_of_rgrp; } return ret; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d4b11c903971..ca71163ff7cf 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -45,7 +45,6 @@ #include "util.h" #include "sys.h" #include "xattr.h" -#include "lops.h" #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) diff --git a/fs/inode.c b/fs/inode.c index 0cd47fe0dbe5..73432e64f874 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item, return LRU_REMOVED; } - /* - * Recently referenced inodes and inodes with many attached pages - * get one more pass. - */ - if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) { + /* recently referenced inodes get one more pass */ + if (inode->i_state & I_REFERENCED) { inode->i_state &= ~I_REFERENCED; spin_unlock(&inode->i_lock); return LRU_ROTATE; diff --git a/fs/iomap.c b/fs/iomap.c index a3088fae567b..897c60215dd1 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page) atomic_set(&iop->read_count, 0); atomic_set(&iop->write_count, 0); bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); + + /* + * migrate_page_move_mapping() assumes that pages with private data have + * their count elevated by 1. + */ + get_page(page); set_page_private(page, (unsigned long)iop); SetPagePrivate(page); return iop; @@ -132,6 +138,7 @@ iomap_page_release(struct page *page) WARN_ON_ONCE(atomic_read(&iop->write_count)); ClearPagePrivate(page); set_page_private(page, 0); + put_page(page); kfree(iop); } @@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage, if (page_has_private(page)) { ClearPagePrivate(page); + get_page(newpage); set_page_private(newpage, page_private(page)); set_page_private(page, 0); + put_page(page); SetPagePrivate(newpage); } @@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, loff_t pos = iocb->ki_pos, start = pos; loff_t end = iocb->ki_pos + count - 1, ret = 0; unsigned int flags = IOMAP_DIRECT; + bool wait_for_completion = is_sync_kiocb(iocb); struct blk_plug plug; struct iomap_dio *dio; @@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, dio->end_io = end_io; dio->error = 0; dio->flags = 0; - dio->wait_for_completion = is_sync_kiocb(iocb); dio->submit.iter = iter; dio->submit.waiter = current; @@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, dio_warn_stale_pagecache(iocb->ki_filp); ret = 0; - if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && + if (iov_iter_rw(iter) == WRITE && !wait_for_completion && !inode->i_sb->s_dio_done_wq) { ret = sb_init_dio_done_wq(inode->i_sb); if (ret < 0) @@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (ret <= 0) { /* magic error code to fall back to buffered I/O */ if (ret == -ENOTBLK) { - dio->wait_for_completion = true; + wait_for_completion = true; ret = 0; } break; @@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (dio->flags & IOMAP_DIO_WRITE_FUA) dio->flags &= ~IOMAP_DIO_NEED_SYNC; + /* + * We are about to drop our additional submission reference, which + * might be the last reference to the dio. There are three three + * different ways we can progress here: + * + * (a) If this is the last reference we will always complete and free + * the dio ourselves. + * (b) If this is not the last reference, and we serve an asynchronous + * iocb, we must never touch the dio after the decrement, the + * I/O completion handler will complete and free it. + * (c) If this is not the last reference, but we serve a synchronous + * iocb, the I/O completion handler will wake us up on the drop + * of the final reference, and we will complete and free it here + * after we got woken by the I/O completion handler. + */ + dio->wait_for_completion = wait_for_completion; if (!atomic_dec_and_test(&dio->ref)) { - if (!dio->wait_for_completion) + if (!wait_for_completion) return -EIOCBQUEUED; for (;;) { @@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, __set_current_state(TASK_RUNNING); } - ret = iomap_dio_complete(dio); - - return ret; + return iomap_dio_complete(dio); out_free_dio: kfree(dio); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 3f23b6840547..bf34ddaa2ad7 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -44,6 +44,7 @@ #include <linux/keyctl.h> #include <linux/key-type.h> #include <keys/user-type.h> +#include <keys/request_key_auth-type.h> #include <linux/module.h> #include "internal.h" @@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy; struct idmap_legacy_upcalldata { struct rpc_pipe_msg pipe_msg; struct idmap_msg idmap_msg; - struct key_construction *key_cons; + struct key *authkey; struct idmap *idmap; }; @@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = { { Opt_find_err, NULL } }; -static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); +static int nfs_idmap_legacy_upcall(struct key *, void *); static ssize_t idmap_pipe_downcall(struct file *, const char __user *, size_t); static void idmap_release_pipe(struct inode *); @@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, static void nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) { - struct key_construction *cons = idmap->idmap_upcall_data->key_cons; + struct key *authkey = idmap->idmap_upcall_data->authkey; kfree(idmap->idmap_upcall_data); idmap->idmap_upcall_data = NULL; - complete_request_key(cons, ret); + complete_request_key(authkey, ret); + key_put(authkey); } static void @@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret) nfs_idmap_complete_pipe_upcall_locked(idmap, ret); } -static int nfs_idmap_legacy_upcall(struct key_construction *cons, - const char *op, - void *aux) +static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) { struct idmap_legacy_upcalldata *data; + struct request_key_auth *rka = get_request_key_auth(authkey); struct rpc_pipe_msg *msg; struct idmap_msg *im; struct idmap *idmap = (struct idmap *)aux; - struct key *key = cons->key; + struct key *key = rka->target_key; int ret = -ENOKEY; if (!aux) @@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, msg = &data->pipe_msg; im = &data->idmap_msg; data->idmap = idmap; - data->key_cons = cons; + data->authkey = key_get(authkey); ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); if (ret < 0) @@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, out2: kfree(data); out1: - complete_request_key(cons, ret); + complete_request_key(authkey, ret); return ret; } @@ -651,9 +652,10 @@ out: static ssize_t idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { + struct request_key_auth *rka; struct rpc_inode *rpci = RPC_I(file_inode(filp)); struct idmap *idmap = (struct idmap *)rpci->private; - struct key_construction *cons; + struct key *authkey; struct idmap_msg im; size_t namelen_in; int ret = -ENOKEY; @@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (idmap->idmap_upcall_data == NULL) goto out_noupcall; - cons = idmap->idmap_upcall_data->key_cons; + authkey = idmap->idmap_upcall_data->authkey; + rka = get_request_key_auth(authkey); if (mlen != sizeof(im)) { ret = -ENOSPC; @@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) ret = nfs_idmap_read_and_verify_message(&im, &idmap->idmap_upcall_data->idmap_msg, - cons->key, cons->authkey); + rka->target_key, authkey); if (ret >= 0) { - key_set_timeout(cons->key, nfs_idmap_cache_timeout); + key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); ret = mlen; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 22ce3c8a2f46..0570391eaa16 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name, size_t len; char *end; + if (unlikely(!dev_name || !*dev_name)) { + dfprintk(MOUNT, "NFS: device name not specified\n"); + return -EINVAL; + } + /* Is the host name protected with square brakcets? */ if (*dev_name == '[') { end = strchr(++dev_name, ']'); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5a0bbf917a32..d09c9f878141 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -238,9 +238,9 @@ out: } /* A writeback failed: mark the page as bad, and invalidate the page cache */ -static void nfs_set_pageerror(struct page *page) +static void nfs_set_pageerror(struct address_space *mapping) { - nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); + nfs_zap_mapping(mapping->host, mapping); } /* @@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, nfs_set_page_writeback(page); WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); - ret = 0; + ret = req->wb_context->error; /* If there is a fatal error that covers this write, just exit */ - if (nfs_error_is_fatal_on_server(req->wb_context->error)) + if (nfs_error_is_fatal_on_server(ret)) goto out_launder; + ret = 0; if (!nfs_pageio_add_request(pgio, req)) { ret = pgio->pg_error; /* @@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, nfs_context_set_write_error(req->wb_context, ret); if (nfs_error_is_fatal_on_server(ret)) goto out_launder; - } + } else + ret = -EAGAIN; nfs_redirty_request(req); - ret = -EAGAIN; } else nfs_add_stats(page_file_mapping(page)->host, NFSIOS_WRITEPAGES, 1); @@ -993,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) nfs_list_remove_request(req); if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes < bytes)) { - nfs_set_pageerror(req->wb_page); + nfs_set_pageerror(page_file_mapping(req->wb_page)); nfs_context_set_write_error(req->wb_context, hdr->error); goto remove_req; } @@ -1347,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsigned int count) { struct nfs_open_context *ctx = nfs_file_open_context(file); - struct inode *inode = page_file_mapping(page)->host; + struct address_space *mapping = page_file_mapping(page); + struct inode *inode = mapping->host; int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); @@ -1365,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page, status = nfs_writepage_setup(ctx, page, offset, count); if (status < 0) - nfs_set_pageerror(page); + nfs_set_pageerror(mapping); else __set_page_dirty_nobuffers(page); out: diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b33f9785b756..72a7681f4046 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net) retval = nfsd_idmap_init(net); if (retval) goto out_idmap_error; - nn->nfsd4_lease = 45; /* default lease time */ - nn->nfsd4_grace = 45; + nn->nfsd4_lease = 90; /* default lease time */ + nn->nfsd4_grace = 90; nn->somebody_reclaimed = false; nn->clverifier_counter = prandom_u32(); nn->clientid_counter = prandom_u32(); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 9824e32b2f23..7dc98e14655d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, loff_t cloned; cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); + if (cloned < 0) + return nfserrno(cloned); if (count && cloned != count) - cloned = -EINVAL; - return nfserrno(cloned < 0 ? cloned : 0); + return nfserrno(-EINVAL); + return 0; } ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, diff --git a/fs/proc/base.c b/fs/proc/base.c index 633a63462573..f5ed9512d193 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) task_lock(p); if (!p->vfork_done && process_shares_mm(p, mm)) { - pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", - task_pid_nr(p), p->comm, - p->signal->oom_score_adj, oom_adj, - task_pid_nr(task), task->comm); p->signal->oom_score_adj = oom_adj; if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) p->signal->oom_score_adj_min = (short)oom_adj; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 8ae109429a88..e39bac94dead 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, inode = proc_get_inode(dir->i_sb, de); if (!inode) return ERR_PTR(-ENOMEM); - d_set_d_op(dentry, &proc_misc_dentry_ops); + d_set_d_op(dentry, de->proc_dops); return d_splice_alias(inode, dentry); } read_unlock(&proc_subdir_lock); @@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, INIT_LIST_HEAD(&ent->pde_openers); proc_set_user(ent, (*parent)->uid, (*parent)->gid); + ent->proc_dops = &proc_misc_dentry_ops; + out: return ent; } diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5185d7f6a51e..95b14196f284 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -44,6 +44,7 @@ struct proc_dir_entry { struct completion *pde_unload_completion; const struct inode_operations *proc_iops; const struct file_operations *proc_fops; + const struct dentry_operations *proc_dops; union { const struct seq_operations *seq_ops; int (*single_show)(struct seq_file *, void *); diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index d5e0fcb3439e..a7b12435519e 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode) return maybe_get_net(PDE_NET(PDE(inode))); } +static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + return 0; +} + +static const struct dentry_operations proc_net_dentry_ops = { + .d_revalidate = proc_net_d_revalidate, + .d_delete = always_delete_dentry, +}; + +static void pde_force_lookup(struct proc_dir_entry *pde) +{ + /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ + pde->proc_dops = &proc_net_dentry_ops; +} + static int seq_open_net(struct inode *inode, struct file *file) { unsigned int state_size = PDE(inode)->state_size; @@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; + pde_force_lookup(p); p->proc_fops = &proc_net_seq_fops; p->seq_ops = ops; p->state_size = state_size; @@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; + pde_force_lookup(p); p->proc_fops = &proc_net_seq_fops; p->seq_ops = ops; p->state_size = state_size; @@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; + pde_force_lookup(p); p->proc_fops = &proc_net_single_fops; p->single_show = show; return proc_register(parent, p); @@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; + pde_force_lookup(p); p->proc_fops = &proc_net_single_fops; p->single_show = show; p->write = write; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0ec9edab2f3..85b0ef890b28 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -423,7 +423,7 @@ struct mem_size_stats { }; static void smaps_account(struct mem_size_stats *mss, struct page *page, - bool compound, bool young, bool dirty) + bool compound, bool young, bool dirty, bool locked) { int i, nr = compound ? 1 << compound_order(page) : 1; unsigned long size = nr * PAGE_SIZE; @@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, else mss->private_clean += size; mss->pss += (u64)size << PSS_SHIFT; + if (locked) + mss->pss_locked += (u64)size << PSS_SHIFT; return; } for (i = 0; i < nr; i++, page++) { int mapcount = page_mapcount(page); + unsigned long pss = (PAGE_SIZE << PSS_SHIFT); if (mapcount >= 2) { if (dirty || PageDirty(page)) mss->shared_dirty += PAGE_SIZE; else mss->shared_clean += PAGE_SIZE; - mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; + mss->pss += pss / mapcount; + if (locked) + mss->pss_locked += pss / mapcount; } else { if (dirty || PageDirty(page)) mss->private_dirty += PAGE_SIZE; else mss->private_clean += PAGE_SIZE; - mss->pss += PAGE_SIZE << PSS_SHIFT; + mss->pss += pss; + if (locked) + mss->pss_locked += pss; } } } @@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; + bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; if (pte_present(*pte)) { @@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); + smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; + bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page; /* FOLL_DUMP will return -EFAULT on huge zero page */ @@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, /* pass */; else VM_BUG_ON_PAGE(1, page); - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); + smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); } #else static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, @@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma, } } #endif - /* mmap_sem is held in m_start */ walk_page_vma(vma, &smaps_walk); - if (vma->vm_flags & VM_LOCKED) - mss->pss_locked += mss->pss; } #define SEQ_PUT_DEC(str, val) \ diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 1c8eecfe52b8..6acf1bfa0bfe 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -768,18 +768,23 @@ xrep_findroot_block( if (!uuid_equal(&btblock->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) goto out; + /* + * Read verifiers can reference b_ops, so we set the pointer + * here. If the verifier fails we'll reset the buffer state + * to what it was before we touched the buffer. + */ + bp->b_ops = fab->buf_ops; fab->buf_ops->verify_read(bp); if (bp->b_error) { + bp->b_ops = NULL; bp->b_error = 0; goto out; } /* * Some read verifiers will (re)set b_ops, so we must be - * careful not to blow away any such assignment. + * careful not to change b_ops after running the verifier. */ - if (!bp->b_ops) - bp->b_ops = fab->buf_ops; } /* diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 338b9d9984e0..d9048bcea49c 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -449,6 +449,7 @@ xfs_map_blocks( } wpc->imap = imap; + xfs_trim_extent_eof(&wpc->imap, ip); trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); return 0; allocate_blocks: @@ -459,6 +460,7 @@ allocate_blocks: ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || imap.br_startoff + imap.br_blockcount <= cow_fsb); wpc->imap = imap; + xfs_trim_extent_eof(&wpc->imap, ip); trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); return 0; } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index eedc5e0156ff..4f5f2ff3f70f 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -776,10 +776,26 @@ _xfs_buf_read( } /* + * Set buffer ops on an unchecked buffer and validate it, if possible. + * * If the caller passed in an ops structure and the buffer doesn't have ops * assigned, set the ops and use them to verify the contents. If the contents * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no * recorded errors and is already in XBF_DONE state. + * + * Under normal operations, every in-core buffer must have buffer ops assigned + * to them when the buffer is read in from disk so that we can validate the + * metadata. + * + * However, there are two scenarios where one can encounter in-core buffers + * that don't have buffer ops. The first is during log recovery of buffers on + * a V4 filesystem, though these buffers are purged at the end of recovery. + * + * The other is online repair, which tries to match arbitrary metadata blocks + * with btree types in order to find the root. If online repair doesn't match + * the buffer with /any/ btree type, the buffer remains in memory in DONE state + * with no ops, and a subsequent read_buf call from elsewhere will not set the + * ops. This function helps us fix this situation. */ int xfs_buf_ensure_ops( @@ -1536,8 +1552,7 @@ __xfs_buf_submit( xfs_buf_ioerror(bp, -EIO); bp->b_flags &= ~XBF_DONE; xfs_buf_stale(bp); - if (bp->b_flags & XBF_ASYNC) - xfs_buf_ioend(bp); + xfs_buf_ioend(bp); return -EIO; } diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h index 8b78c0ba08b1..8b78c0ba08b1 100644 --- a/include/uapi/asm-generic/shmparam.h +++ b/include/asm-generic/shmparam.h diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index b53be41929be..04f7ac345984 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -350,7 +350,7 @@ #define IMX8MQ_CLK_VPU_G2_ROOT 241 /* SCCG PLL GATE */ -#define IMX8MQ_SYS1_PLL_OUT 232 +#define IMX8MQ_SYS1_PLL_OUT 242 #define IMX8MQ_SYS2_PLL_OUT 243 #define IMX8MQ_SYS3_PLL_OUT 244 #define IMX8MQ_DRAM_PLL_OUT 245 @@ -372,24 +372,24 @@ /* txesc clock */ #define IMX8MQ_CLK_DSI_IPG_DIV 256 -#define IMX8MQ_CLK_TMU_ROOT 265 +#define IMX8MQ_CLK_TMU_ROOT 257 /* Display root clocks */ -#define IMX8MQ_CLK_DISP_AXI_ROOT 266 -#define IMX8MQ_CLK_DISP_APB_ROOT 267 -#define IMX8MQ_CLK_DISP_RTRM_ROOT 268 +#define IMX8MQ_CLK_DISP_AXI_ROOT 258 +#define IMX8MQ_CLK_DISP_APB_ROOT 259 +#define IMX8MQ_CLK_DISP_RTRM_ROOT 260 -#define IMX8MQ_CLK_OCOTP_ROOT 269 +#define IMX8MQ_CLK_OCOTP_ROOT 261 -#define IMX8MQ_CLK_DRAM_ALT_ROOT 270 -#define IMX8MQ_CLK_DRAM_CORE 271 +#define IMX8MQ_CLK_DRAM_ALT_ROOT 262 +#define IMX8MQ_CLK_DRAM_CORE 263 -#define IMX8MQ_CLK_MU_ROOT 272 -#define IMX8MQ_VIDEO2_PLL_OUT 273 +#define IMX8MQ_CLK_MU_ROOT 264 +#define IMX8MQ_VIDEO2_PLL_OUT 265 -#define IMX8MQ_CLK_CLKO2 274 +#define IMX8MQ_CLK_CLKO2 266 -#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 +#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267 -#define IMX8MQ_CLK_END 276 +#define IMX8MQ_CLK_END 268 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 7b24fc791146..228a5e234af0 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -71,7 +71,6 @@ #define MMP2_CLK_CCIC1_MIX 117 #define MMP2_CLK_CCIC1_PHY 118 #define MMP2_CLK_CCIC1_SPHY 119 -#define MMP2_CLK_SP 120 #define MMP2_NR_CLKS 200 #endif diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 000000000000..a726dd3f1dc6 --- /dev/null +++ b/include/keys/request_key_auth-type.h @@ -0,0 +1,36 @@ +/* request_key authorisation token key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H +#define _KEYS_REQUEST_KEY_AUTH_TYPE_H + +#include <linux/key.h> + +/* + * Authorisation record for request_key(). + */ +struct request_key_auth { + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +} __randomize_layout; + +static inline struct request_key_auth *get_request_key_auth(const struct key *key) +{ + return key->payload.data[0]; +} + + +#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index e098cbe27db5..12babe991594 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -31,7 +31,7 @@ struct user_key_payload { struct rcu_head rcu; /* RCU destructor */ unsigned short datalen; /* length of this data */ - char data[0]; /* actual data */ + char data[0] __aligned(__alignof__(u64)); /* actual data */ }; extern struct key_type key_type_user; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4f31f96bbfab..c36c86f1ec9a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -100,7 +100,7 @@ enum vgic_irq_config { }; struct vgic_irq { - spinlock_t irq_lock; /* Protects the content of the struct */ + raw_spinlock_t irq_lock; /* Protects the content of the struct */ struct list_head lpi_list; /* Used to link all LPIs together */ struct list_head ap_list; @@ -256,7 +256,7 @@ struct vgic_dist { u64 propbaser; /* Protects the lpi_list and the count value below. */ - spinlock_t lpi_list_lock; + raw_spinlock_t lpi_list_lock; struct list_head lpi_list_head; int lpi_list_count; @@ -307,7 +307,7 @@ struct vgic_cpu { unsigned int used_lrs; struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; - spinlock_t ap_list_lock; /* Protects the ap_list */ + raw_spinlock_t ap_list_lock; /* Protects the ap_list */ /* * List of IRQs that this VCPU should consider because they are either diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 87715f20b69a..03b4c4f225d0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1014,6 +1014,13 @@ struct acpi_gpio_mapping { /* Ignore IoRestriction field */ #define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) +/* + * When ACPI GPIO mapping table is in use the index parameter inside it + * refers to the GPIO resource in _CRS method. That index has no + * distinction of actual type of the resource. When consumer wants to + * get GpioIo type explicitly, this quirk may be used. + */ +#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) unsigned int quirks; }; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 8804753805ac..7bb2d8de9f30 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); static inline sector_t blk_rq_trace_sector(struct request *rq) { - return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); + /* + * Tracing should ignore starting sector for passthrough requests and + * requests where starting sector didn't get set. + */ + if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) + return 0; + return blk_rq_pos(rq); } static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 19f32b0c29af..6b318efd8a74 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -34,6 +34,7 @@ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___noclone__ 1 @@ -101,6 +102,19 @@ #define __attribute_const__ __attribute__((__const__)) /* + * Optional: only supported since gcc >= 9 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute + */ +#if __has_attribute(__copy__) +# define __copy(symbol) __attribute__((__copy__(symbol))) +#else +# define __copy(symbol) +#endif + +/* * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' * attribute warnings entirely and for good") for more information. * diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 218df7f4d3e1..5041357d0297 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -180,12 +180,10 @@ enum cpuhp_smt_control { #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) extern enum cpuhp_smt_control cpu_smt_control; extern void cpu_smt_disable(bool force); -extern void cpu_smt_check_topology_early(void); extern void cpu_smt_check_topology(void); #else # define cpu_smt_control (CPU_SMT_ENABLED) static inline void cpu_smt_disable(bool force) { } -static inline void cpu_smt_check_topology_early(void) { } static inline void cpu_smt_check_topology(void) { } #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ef4b70f64f33..60996e64c579 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -62,9 +62,10 @@ extern const struct qstr slash_name; struct dentry_stat_t { long nr_dentry; long nr_unused; - long age_limit; /* age in seconds */ - long want_pages; /* pages requested by system */ - long dummy[2]; + long age_limit; /* age in seconds */ + long want_pages; /* pages requested by system */ + long nr_negative; /* # of unused negative dentries */ + long dummy; /* Reserved for future use */ }; extern struct dentry_stat_t dentry_stat; diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..28604a8d0aa9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature) extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); extern bool efi_is_table_address(unsigned long phys_addr); - -extern int efi_apply_persistent_mem_reservations(void); #else static inline bool efi_enabled(int feature) { @@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) { return false; } - -static inline int efi_apply_persistent_mem_reservations(void) -{ - return 0; -} #endif extern int efi_status_to_err(efi_status_t status); diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..e532fcc6e4b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) return qdisc_skb_cb(skb)->data; } -static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, - struct sk_buff *skb) +static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; @@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, return res; } +static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u32 res; + + preempt_disable(); + res = __bpf_prog_run_save_cb(prog, skb); + preempt_enable(); + return res; +} + static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); + u32 res; if (unlikely(prog->cb_access)) memset(cb_data, 0, BPF_SKB_CB_LEN); - return BPF_PROG_RUN(prog, skb); + preempt_disable(); + res = BPF_PROG_RUN(prog, skb); + preempt_enable(); + return res; } static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..29d8e2cfed0e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1479,11 +1479,12 @@ struct super_block { struct user_namespace *s_user_ns; /* - * Keep the lru lists last in the structure so they always sit on their - * own individual cachelines. + * The list_lru structure is essentially just a pointer to a table + * of per-node lru lists, each of which has its own spinlock. + * There is no need to put them into separate cachelines. */ - struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; - struct list_lru s_inode_lru ____cacheline_aligned_in_smp; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; struct rcu_head rcu; struct work_struct destroy_work; diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h index 8663f216c563..2d6100edf204 100644 --- a/include/linux/hid-debug.h +++ b/include/linux/hid-debug.h @@ -24,7 +24,10 @@ #ifdef CONFIG_DEBUG_FS +#include <linux/kfifo.h> + #define HID_DEBUG_BUFSIZE 512 +#define HID_DEBUG_FIFOSIZE 512 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); void hid_dump_report(struct hid_device *, int , u8 *, int); @@ -37,11 +40,8 @@ void hid_debug_init(void); void hid_debug_exit(void); void hid_debug_event(struct hid_device *, char *); - struct hid_debug_list { - char *hid_debug_buf; - int head; - int tail; + DECLARE_KFIFO_PTR(hid_debug_fifo, char); struct fasync_struct *fasync; struct hid_device *hdev; struct list_head node; @@ -64,4 +64,3 @@ struct hid_debug_list { #endif #endif - diff --git a/include/linux/ide.h b/include/linux/ide.h index e7d29ae633cd..971cf76a78a0 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -615,6 +615,7 @@ struct ide_drive_s { /* current sense rq and buffer */ bool sense_rq_armed; + bool sense_rq_active; struct request *sense_rq; struct request_sense sense_data; @@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); extern void ide_timer_expiry(struct timer_list *t); extern irqreturn_t ide_intr(int irq, void *dev_id); extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); +extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); void ide_init_disk(struct gendisk *, ide_drive_t *); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 071b4cbdf010..c848a7cc502e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -319,7 +319,7 @@ #define GITS_TYPER_PLPIS (1UL << 0) #define GITS_TYPER_VLPIS (1UL << 1) #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 -#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) #define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) diff --git a/include/linux/key-type.h b/include/linux/key-type.h index bc9af551fc83..e49d1de0614e 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -21,15 +21,6 @@ struct kernel_pkey_query; struct kernel_pkey_params; /* - * key under-construction record - * - passed to the request_key actor if supplied - */ -struct key_construction { - struct key *key; /* key being constructed */ - struct key *authkey;/* authorisation for key being constructed */ -}; - -/* * Pre-parsed payload, used by key add, update and instantiate. * * This struct will be cleared and data and datalen will be set with the data @@ -50,8 +41,7 @@ struct key_preparsed_payload { time64_t expiry; /* Expiry time of key */ } __randomize_layout; -typedef int (*request_key_actor_t)(struct key_construction *key, - const char *op, void *aux); +typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); /* * Preparsed matching criterion. @@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, - struct key *instkey); + struct key *authkey); extern int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, - struct key *instkey); -extern void complete_request_key(struct key_construction *cons, int error); + struct key *authkey); +extern void complete_request_key(struct key *authkey, int error); static inline int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, - struct key *instkey) + struct key *authkey) { - return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); + return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); } extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 64c41cf45590..859b55b66db2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -29,9 +29,6 @@ extern unsigned long max_pfn; */ extern unsigned long long max_possible_pfn; -#define INIT_MEMBLOCK_REGIONS 128 -#define INIT_PHYSMEM_REGIONS 4 - /** * enum memblock_flags - definition of memory region attributes * @MEMBLOCK_NONE: no special request diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 07da5c6c5ba0..368267c1b71b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -21,14 +21,16 @@ struct vmem_altmap; * walkers which rely on the fully initialized page->flags and others * should use this rather than pfn_valid && pfn_to_page */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - unsigned long ___nr = pfn_to_section_nr(pfn); \ - \ - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ - ___page = pfn_to_page(pfn); \ - ___page; \ +#define pfn_to_online_page(pfn) \ +({ \ + struct page *___page = NULL; \ + unsigned long ___pfn = pfn; \ + unsigned long ___nr = pfn_to_section_nr(___pfn); \ + \ + if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ + pfn_valid_within(___pfn)) \ + ___page = pfn_to_page(___pfn); \ + ___page; \ }) /* diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index de7377815b6b..8ef330027b13 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -308,6 +308,7 @@ struct mmc_card { unsigned int nr_parts; unsigned int bouncesz; /* Bounce buffer size */ + struct workqueue_struct *complete_wq; /* Private workqueue */ }; static inline bool mmc_large_sector(struct mmc_card *card) diff --git a/include/linux/module.h b/include/linux/module.h index 8fa38d3e7538..f5bc4c046461 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -129,13 +129,13 @@ extern void cleanup_module(void); #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); + int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); + void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); #endif diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -11,6 +11,8 @@ #define _LINUX_NETDEV_FEATURES_H #include <linux/types.h> +#include <linux/bitops.h> +#include <asm/byteorder.h> typedef u64 netdev_features_t; @@ -154,8 +156,26 @@ enum { #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) -#define for_each_netdev_feature(mask_addr, bit) \ - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) +/* Finds the next feature with the highest number of the range of start till 0. + */ +static inline int find_next_netdev_feature(u64 feature, unsigned long start) +{ + /* like BITMAP_LAST_WORD_MASK() for u64 + * this sets the most significant 64 - start to 0. + */ + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); + + return fls64(feature) - 1; +} + +/* This goes for the MSB to the LSB through the set feature bits, + * mask_addr should be a u64 and bit an int + */ +#define for_each_netdev_feature(mask_addr, bit) \ + for ((bit) = find_next_netdev_feature((mask_addr), \ + NETDEV_FEATURE_COUNT); \ + (bit) >= 0; \ + (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..86dbb3e29139 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1483,6 +1483,7 @@ struct net_device_ops { * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook * @IFF_FAILOVER: device is a failover master device * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1514,6 +1515,7 @@ enum netdev_priv_flags { IFF_NO_RX_HANDLER = 1<<26, IFF_FAILOVER = 1<<27, IFF_FAILOVER_SLAVE = 1<<28, + IFF_L3MDEV_RX_HANDLER = 1<<29, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1544,6 +1546,7 @@ enum netdev_priv_flags { #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER #define IFF_FAILOVER IFF_FAILOVER #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE +#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER /** * struct net_device - The DEVICE structure. @@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } +static inline bool netif_has_l3_rx_handler(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; +} + static inline bool netif_is_l3_master(const struct net_device *dev) { return dev->priv_flags & IFF_L3MDEV_MASTER; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..e1a051724f7e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -447,6 +447,11 @@ struct pmu { * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 value); /* optional */ }; enum perf_addr_filter_action_t { diff --git a/include/linux/phy.h b/include/linux/phy.h index ef20aeea10cc..333b56d8f746 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); -static inline bool __phy_is_started(struct phy_device *phydev) -{ - WARN_ON(!mutex_is_locked(&phydev->lock)); - - return phydev->state >= PHY_UP; -} - /** * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct */ static inline bool phy_is_started(struct phy_device *phydev) { - bool started; - - mutex_lock(&phydev->lock); - started = __phy_is_started(phydev); - mutex_unlock(&phydev->lock); - - return started; + return phydev->state >= PHY_UP; } void phy_resolve_aneg_linkmode(struct phy_device *phydev); @@ -1005,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } +static inline int genphy_no_ack_interrupt(struct phy_device *phydev) +{ + return 0; +} +static inline int genphy_no_config_intr(struct phy_device *phydev) +{ + return 0; +} int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum); int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 54af4eef169f..fed5be706bc9 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) static inline void pm_runtime_mark_last_busy(struct device *dev) { - WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); + WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); } static inline bool pm_runtime_is_irq_safe(struct device *dev) diff --git a/include/linux/sched.h b/include/linux/sched.h index d2f90fa92468..bba3afb4e9bf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -995,7 +995,7 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock: */ struct list_head cg_list; #endif -#ifdef CONFIG_X86_RESCTRL +#ifdef CONFIG_X86_CPU_RESCTRL u32 closid; u32 rmid; #endif diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index ec912d01126f..ecdc6542070f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ +#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/signal.h b/include/linux/signal.h index cc7e2c1cd444..9702016734b1 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); #endif #define siginmask(sig, mask) \ - ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) + ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 95d25b010a25..bdb9563c64a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) skb_set_transport_header(skb, keys.control.thoff); - else + else if (offset_hint >= 0) skb_set_transport_header(skb, offset_hint); } @@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; } +static inline bool skb_is_gso_tcp(const struct sk_buff *skb) +{ + return skb_is_gso(skb) && + skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); +} + static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7ddfc65586b0..4335bd771ce5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -184,6 +184,7 @@ struct plat_stmmacenet_data { struct clk *pclk; struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; + unsigned int clk_ref_rate; struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..e0348cb0a1dd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + } else { + /* gso packets without NEEDS_CSUM do not set transport_offset. + * probe and drop if does not match one of the above types. + */ + if (gso_type && skb->network_header) { + if (!skb->protocol) + virtio_net_hdr_set_proto(skb, hdr); +retry: + skb_probe_transport_header(skb, -1); + if (!skb_transport_header_was_set(skb)) { + /* UFO does not specify ipv4 or 6: try both */ + if (gso_type & SKB_GSO_UDP && + skb->protocol == htons(ETH_P_IP)) { + skb->protocol = htons(ETH_P_IPV6); + goto retry; + } + return -EINVAL; + } + } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 00b5e7825508..74ff688568a0 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -39,6 +39,7 @@ struct inet_peer { u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ + u32 n_redirects; unsigned long rate_last; /* * Once inet_peer is queued for deletion (refcnt == 0), following field diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 78fa0ac4613c..5175fd63cd82 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) if (netif_is_l3_slave(skb->dev)) master = netdev_master_upper_dev_get_rcu(skb->dev); - else if (netif_is_l3_master(skb->dev)) + else if (netif_is_l3_master(skb->dev) || + netif_has_l3_rx_handler(skb->dev)) master = skb->dev; if (master && master->l3mdev_ops->l3mdev_l3_rcv) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 841835a387e1..b4984bbbe157 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -469,9 +469,7 @@ struct nft_set_binding { int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding); -void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding); + struct nft_set_binding *binding, bool commit); void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); /** @@ -721,6 +719,13 @@ struct nft_expr_type { #define NFT_EXPR_STATEFUL 0x1 #define NFT_EXPR_GC 0x2 +enum nft_trans_phase { + NFT_TRANS_PREPARE, + NFT_TRANS_ABORT, + NFT_TRANS_COMMIT, + NFT_TRANS_RELEASE +}; + /** * struct nft_expr_ops - nf_tables expression operations * @@ -750,7 +755,8 @@ struct nft_expr_ops { void (*activate)(const struct nft_ctx *ctx, const struct nft_expr *expr); void (*deactivate)(const struct nft_ctx *ctx, - const struct nft_expr *expr); + const struct nft_expr *expr, + enum nft_trans_phase phase); void (*destroy)(const struct nft_ctx *ctx, const struct nft_expr *expr); void (*destroy_clone)(const struct nft_ctx *ctx, @@ -1323,12 +1329,15 @@ struct nft_trans_rule { struct nft_trans_set { struct nft_set *set; u32 set_id; + bool bound; }; #define nft_trans_set(trans) \ (((struct nft_trans_set *)trans->data)->set) #define nft_trans_set_id(trans) \ (((struct nft_trans_set *)trans->data)->set_id) +#define nft_trans_set_bound(trans) \ + (((struct nft_trans_set *)trans->data)->bound) struct nft_trans_chain { bool update; diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index b669fe6dbc3b..98f31c7ea23d 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h @@ -63,10 +63,11 @@ struct pnpipehdr { u8 state_after_reset; /* reset request */ u8 error_code; /* any response */ u8 pep_type; /* status indication */ - u8 data[1]; + u8 data0; /* anything else */ }; + u8 data[]; }; -#define other_pep_type data[1] +#define other_pep_type data[0] static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) { diff --git a/include/net/sock.h b/include/net/sock.h index 2b229f7be8eb..f43f935cb113 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) percpu_counter_inc(sk->sk_prot->sockets_allocated); } -static inline int +static inline u64 sk_sockets_allocated_read_positive(struct sock *sk) { return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); diff --git a/include/net/tls.h b/include/net/tls.h index 2a6ac8d642af..1486b60c4de8 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -120,6 +120,8 @@ struct tls_rec { struct scatterlist sg_aead_out[2]; char aad_space[TLS_AAD_SPACE_SIZE]; + u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE + + TLS_CIPHER_AES_GCM_128_SALT_SIZE]; struct aead_request aead_req; u8 aead_req_ctx[]; }; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 7298a53b9702..85386becbaea 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) xfrm_pol_put(pols[i]); } -void __xfrm_state_destroy(struct xfrm_state *); +void __xfrm_state_destroy(struct xfrm_state *, bool); static inline void __xfrm_state_put(struct xfrm_state *x) { @@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x) static inline void xfrm_state_put(struct xfrm_state *x) { if (refcount_dec_and_test(&x->refcnt)) - __xfrm_state_destroy(x); + __xfrm_state_destroy(x, false); +} + +static inline void xfrm_state_put_sync(struct xfrm_state *x) +{ + if (refcount_dec_and_test(&x->refcnt)) + __xfrm_state_destroy(x, true); } static inline void xfrm_state_hold(struct xfrm_state *x) @@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo { struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); int xfrm_state_delete(struct xfrm_state *x); -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a3ceed3a040a..80debf5982ac 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2579,9 +2579,10 @@ struct ib_device { const struct uapi_definition *driver_def; enum rdma_driver_id driver_id; + /* - * Provides synchronization between device unregistration and netlink - * commands on a device. To be used only by core. + * Positive refcount indicates that the device is currently + * registered and cannot be unregistered. */ refcount_t refcount; struct completion unreg_completion; @@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags) int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); +/** + * ib_device_try_get: Hold a registration lock + * device: The device to lock + * + * A device under an active registration lock cannot become unregistered. It + * is only possible to obtain a registration lock on a device that is fully + * registered, otherwise this function returns false. + * + * The registration lock is only necessary for actions which require the + * device to still be registered. Uses that only require the device pointer to + * be valid should use get_device(&ibdev->dev) to hold the memory. + * + */ +static inline bool ib_device_try_get(struct ib_device *dev) +{ + return refcount_inc_not_zero(&dev->refcount); +} + +void ib_device_put(struct ib_device *device); struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr); diff --git a/include/sound/cs35l36.h b/include/sound/cs35l36.h new file mode 100644 index 000000000000..8f8049d390f0 --- /dev/null +++ b/include/sound/cs35l36.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/sound/cs35l36.h -- Platform data for CS35L36 + * + * Copyright 2018 Cirrus Logic, Inc. + * + * Author: James Schulman <james.schulman@cirrus.com> + * + */ + +#ifndef __CS35L36_H +#define __CS35L36_H + +struct cs35l36_vpbr_cfg { + bool is_present; + bool vpbr_en; + int vpbr_thld; + int vpbr_atk_rate; + int vpbr_atk_vol; + int vpbr_max_attn; + int vpbr_wait; + int vpbr_rel_rate; + int vpbr_mute_en; +}; + +struct cs35l36_platform_data { + bool multi_amp_mode; + bool dcm_mode; + bool amp_pcm_inv; + bool imon_pol_inv; + bool vmon_pol_inv; + int boost_ind; + int bst_vctl; + int bst_vctl_sel; + int bst_ipk; + bool extern_boost; + int temp_warn_thld; + int irq_drv_sel; + int irq_gpio_sel; + struct cs35l36_vpbr_cfg vpbr_config; +}; + +#endif /* __CS35L36_H */ diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 2c4cfaa135a6..c679f6116580 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -99,10 +99,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data( * playback. */ #define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) -/* - * The PCM streams have custom channel names specified. - */ -#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4) /** * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 6cbbeed9cdd0..35b38e41e5b2 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -86,9 +86,6 @@ struct snd_soc_acpi_mach_params { * is not constant since this field may be updated at run-time * @sof_fw_filename: Sound Open Firmware file name, if enabled * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled - * @asoc_plat_name: ASoC platform name, used for binding machine drivers - * if non NULL - * @new_mach_data: machine driver private data fixup */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { @@ -102,8 +99,6 @@ struct snd_soc_acpi_mach { struct snd_soc_acpi_mach_params mach_params; const char *sof_fw_filename; const char *sof_tplg_filename; - const char *asoc_plat_name; - struct platform_device * (*new_mach_data)(void *pdata); }; #define SND_SOC_ACPI_MAX_CODECS 3 diff --git a/include/sound/soc.h b/include/sound/soc.h index 95689680336b..eb7db605955b 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -802,6 +802,9 @@ struct snd_soc_component_driver { int probe_order; int remove_order; + /* signal if the module handling the component cannot be removed */ + unsigned int ignore_module_refcount:1; + /* bits */ unsigned int idle_bias_on:1; unsigned int suspend_bias_off:1; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 14565d703291..e8baca85bac6 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -137,15 +137,21 @@ enum { INET_DIAG_TCLASS, INET_DIAG_SKMEMINFO, INET_DIAG_SHUTDOWN, - INET_DIAG_DCTCPINFO, - INET_DIAG_PROTOCOL, /* response attribute only */ + + /* + * Next extenstions cannot be requested in struct inet_diag_req_v2: + * its field idiag_ext has only 8 bits. + */ + + INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */ + INET_DIAG_PROTOCOL, /* response attribute only */ INET_DIAG_SKV6ONLY, INET_DIAG_LOCALS, INET_DIAG_PEERS, INET_DIAG_PAD, - INET_DIAG_MARK, - INET_DIAG_BBRINFO, - INET_DIAG_CLASS_ID, + INET_DIAG_MARK, /* only with CAP_NET_ADMIN */ + INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ + INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ INET_DIAG_MD5SIG, __INET_DIAG_MAX, }; diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 1196e1c1d4f6..ff8e7dc9d4dd 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -79,6 +79,12 @@ #define VIRTIO_F_RING_PACKED 34 /* + * This feature indicates that memory accesses by the driver and the + * device are ordered in a way described by the platform. + */ +#define VIRTIO_F_ORDER_PLATFORM 36 + +/* * Does the device support Single Root I/O Virtualization? */ #define VIRTIO_F_SR_IOV 37 diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 2414f8af26b3..4c4e24c291a5 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -213,14 +213,4 @@ struct vring_packed_desc { __le16 flags; }; -struct vring_packed { - unsigned int num; - - struct vring_packed_desc *desc; - - struct vring_packed_desc_event *driver; - - struct vring_packed_desc_event *device; -}; - #endif /* _UAPI_LINUX_VIRTIO_RING_H */ diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index ef3c7ec793a7..eb76b38a00d4 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq { __aligned_u64 que_addr; }; +struct hns_roce_ib_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + struct hns_roce_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; diff --git a/init/Kconfig b/init/Kconfig index 513fa544a134..c9386a365eea 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED per default but can be enabled through passing psi=1 on the kernel commandline during boot. + This feature adds some code to the task wakeup and sleep + paths of the scheduler. The overhead is too low to affect + common scheduling-intense workloads in practice (such as + webservers, memcache), but it does show up in artificial + scheduler stress tests, such as hackbench. + + If you are paranoid and not sure what the kernel will be + used for, say Y. + + Say N if unsure. + endmenu # "CPU/Task time and stats accounting" config CPU_ISOLATION @@ -825,7 +836,7 @@ config CGROUP_PIDS PIDs controller is designed to stop this from happening. It should be noted that organisational operations (such as attaching - to a cgroup hierarchy will *not* be blocked by the PIDs controller), + to a cgroup hierarchy) will *not* be blocked by the PIDs controller, since the PIDs limit only affects a process's ability to fork, not to attach to a cgroup. diff --git a/init/initramfs.c b/init/initramfs.c index 7cea802d00ef..fca899622937 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -550,6 +550,7 @@ skip: initrd_end = 0; } +#ifdef CONFIG_BLK_DEV_RAM #define BUF_SIZE 1024 static void __init clean_rootfs(void) { @@ -596,6 +597,7 @@ static void __init clean_rootfs(void) ksys_close(fd); kfree(buf); } +#endif static int __init populate_rootfs(void) { @@ -638,10 +640,8 @@ static int __init populate_rootfs(void) printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); - if (err) { + if (err) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); - clean_rootfs(); - } free_initrd(); #endif } diff --git a/init/main.c b/init/main.c index e2e80ca3165a..c86a1c8f19f4 100644 --- a/init/main.c +++ b/init/main.c @@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void) initrd_start = 0; } #endif - page_ext_init(); kmemleak_init(); setup_per_cpu_pageset(); numa_policy_init(); @@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void) sched_init_smp(); page_alloc_init_late(); + /* Initialize page ext after all struct pages are initialized. */ + page_ext_init(); do_basic_setup(); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index befe570be5ba..c57bd10340ed 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, /* "typedef void new_void", "const void"...etc */ if (!btf_type_is_void(next_type) && - !btf_type_is_fwd(next_type)) { + !btf_type_is_fwd(next_type) && + !btf_type_is_func_proto(next_type)) { btf_verifier_log_type(env, v->t, "Invalid type_id"); return -EINVAL; } diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ab612fe9862f..d17d05570a3f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, bpf_compute_and_save_data_end(skb, &saved_data_end); ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, - bpf_prog_run_save_cb); + __bpf_prog_run_save_cb); bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); skb->sk = save_sk; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4b7c76765d9d..f9274114c88d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) } if (htab_is_prealloc(htab)) { - pcpu_freelist_push(&htab->freelist, &l->fnode); + __pcpu_freelist_push(&htab->freelist, &l->fnode); } else { atomic_dec(&htab->count); l->htab = htab; @@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, } else { struct pcpu_freelist_node *l; - l = pcpu_freelist_pop(&htab->freelist); + l = __pcpu_freelist_pop(&htab->freelist); if (!l) return ERR_PTR(-E2BIG); l_new = container_of(l, struct htab_elem, fnode); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index abf1002080df..93a5cbbde421 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) } if (!node || node->prefixlen != key->prefixlen || + node->prefixlen != matchlen || (node->flags & LPM_TREE_NODE_FLAG_IM)) { ret = -ENOENT; goto out; diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 673fa6fe2d73..0c1b4ba9e90e 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c @@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) free_percpu(s->freelist); } -static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, - struct pcpu_freelist_node *node) +static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, + struct pcpu_freelist_node *node) { raw_spin_lock(&head->lock); node->next = head->first; @@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, raw_spin_unlock(&head->lock); } -void pcpu_freelist_push(struct pcpu_freelist *s, +void __pcpu_freelist_push(struct pcpu_freelist *s, struct pcpu_freelist_node *node) { struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); - __pcpu_freelist_push(head, node); + ___pcpu_freelist_push(head, node); +} + +void pcpu_freelist_push(struct pcpu_freelist *s, + struct pcpu_freelist_node *node) +{ + unsigned long flags; + + local_irq_save(flags); + __pcpu_freelist_push(s, node); + local_irq_restore(flags); } void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, @@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, for_each_possible_cpu(cpu) { again: head = per_cpu_ptr(s->freelist, cpu); - __pcpu_freelist_push(head, buf); + ___pcpu_freelist_push(head, buf); i++; buf += elem_size; if (i == nr_elems) @@ -74,14 +84,12 @@ again: local_irq_restore(flags); } -struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) +struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) { struct pcpu_freelist_head *head; struct pcpu_freelist_node *node; - unsigned long flags; int orig_cpu, cpu; - local_irq_save(flags); orig_cpu = cpu = raw_smp_processor_id(); while (1) { head = per_cpu_ptr(s->freelist, cpu); @@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) node = head->first; if (node) { head->first = node->next; - raw_spin_unlock_irqrestore(&head->lock, flags); + raw_spin_unlock(&head->lock); return node; } raw_spin_unlock(&head->lock); cpu = cpumask_next(cpu, cpu_possible_mask); if (cpu >= nr_cpu_ids) cpu = 0; - if (cpu == orig_cpu) { - local_irq_restore(flags); + if (cpu == orig_cpu) return NULL; - } } } + +struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) +{ + struct pcpu_freelist_node *ret; + unsigned long flags; + + local_irq_save(flags); + ret = __pcpu_freelist_pop(s); + local_irq_restore(flags); + return ret; +} diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h index 3049aae8ea1e..c3960118e617 100644 --- a/kernel/bpf/percpu_freelist.h +++ b/kernel/bpf/percpu_freelist.h @@ -22,8 +22,12 @@ struct pcpu_freelist_node { struct pcpu_freelist_node *next; }; +/* pcpu_freelist_* do spin_lock_irqsave. */ void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); +/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ +void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); +struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, u32 nr_elems); int pcpu_freelist_init(struct pcpu_freelist *); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) struct stack_map_irq_work *work; work = container_of(entry, struct stack_map_irq_work, irq_work); - up_read(work->sem); + up_read_non_owner(work->sem); work->sem = NULL; } @@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } else { work->sem = ¤t->mm->mmap_sem; irq_work_queue(&work->irq_work); + /* + * The irq_work will release the mmap_sem with + * up_read_non_owner(). The rwsem_release() is called + * here to release the lock from lockdep's perspective. + */ + rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); } } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b155cd17c1bd..8577bb7f8be6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr) if (bpf_map_is_dev_bound(map)) { err = bpf_map_offload_lookup_elem(map, key, value); - } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { + goto done; + } + + preempt_disable(); + this_cpu_inc(bpf_prog_active); + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { err = bpf_percpu_hash_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_copy(map, key, value); @@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr) } rcu_read_unlock(); } + this_cpu_dec(bpf_prog_active); + preempt_enable(); +done: if (err) goto free_value; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 56674a7c3778..8f295b790297 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, return 0; } -static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, - int size, enum bpf_access_type t) +static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, + u32 regno, int off, int size, + enum bpf_access_type t) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = ®s[regno]; - struct bpf_insn_access_aux info; + struct bpf_insn_access_aux info = {}; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", @@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, return -EACCES; } + env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; + return 0; } @@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn verbose(env, "cannot write into socket\n"); return -EACCES; } - err = check_sock_access(env, regno, off, size, t); + err = check_sock_access(env, insn_idx, regno, off, size, t); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { diff --git a/kernel/cpu.c b/kernel/cpu.c index 91d5c38eb7e5..d1c6d152da89 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { } #ifdef CONFIG_HOTPLUG_SMT enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; -EXPORT_SYMBOL_GPL(cpu_smt_control); - -static bool cpu_smt_available __read_mostly; void __init cpu_smt_disable(bool force) { @@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force) /* * The decision whether SMT is supported can only be done after the full - * CPU identification. Called from architecture code before non boot CPUs - * are brought up. - */ -void __init cpu_smt_check_topology_early(void) -{ - if (!topology_smt_supported()) - cpu_smt_control = CPU_SMT_NOT_SUPPORTED; -} - -/* - * If SMT was disabled by BIOS, detect it here, after the CPUs have been - * brought online. This ensures the smt/l1tf sysfs entries are consistent - * with reality. cpu_smt_available is set to true during the bringup of non - * boot CPUs when a SMT sibling is detected. Note, this may overwrite - * cpu_smt_control's previous setting. + * CPU identification. Called from architecture code. */ void __init cpu_smt_check_topology(void) { - if (!cpu_smt_available) + if (!topology_smt_supported()) cpu_smt_control = CPU_SMT_NOT_SUPPORTED; } @@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable); static inline bool cpu_smt_allowed(unsigned int cpu) { - if (topology_is_primary_thread(cpu)) + if (cpu_smt_control == CPU_SMT_ENABLED) return true; - /* - * If the CPU is not a 'primary' thread and the booted_once bit is - * set then the processor has SMT support. Store this information - * for the late check of SMT support in cpu_smt_check_topology(). - */ - if (per_cpu(cpuhp_state, cpu).booted_once) - cpu_smt_available = true; - - if (cpu_smt_control == CPU_SMT_ENABLED) + if (topology_is_primary_thread(cpu)) return true; /* @@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) */ cpuhp_offline_cpu_device(cpu); } - if (!ret) { + if (!ret) cpu_smt_control = ctrlval; - arch_smt_update(); - } cpu_maps_update_done(); return ret; } @@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void) cpu_maps_update_begin(); cpu_smt_control = CPU_SMT_ENABLED; - arch_smt_update(); for_each_present_cpu(cpu) { /* Skip online CPUs and CPUs on offline nodes */ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) diff --git a/kernel/events/core.c b/kernel/events/core.c index 3cd13a30f732..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - - if (ret || !write) - return ret; - + int ret; + int perf_cpu = sysctl_perf_cpu_time_max_percent; /* * If throttling is disabled don't allow the write: */ - if (sysctl_perf_cpu_time_max_percent == 100 || - sysctl_perf_cpu_time_max_percent == 0) + if (write && (perf_cpu == 100 || perf_cpu == 0)) return -EINVAL; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret || !write) + return ret; + max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; update_perf_cpu_limits(); @@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, } } +static int perf_event_check_period(struct perf_event *event, u64 value) +{ + return event->pmu->check_period(event, value); +} + static int perf_event_period(struct perf_event *event, u64 __user *arg) { u64 value; @@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) if (event->attr.freq && value > sysctl_perf_event_sample_rate) return -EINVAL; + if (perf_event_check_period(event, value)) + return -EINVAL; + event_function_call(event, __perf_event_period, &value); return 0; @@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) return 0; } +static int perf_event_nop_int(struct perf_event *event, u64 value) +{ + return 0; +} + static DEFINE_PER_CPU(unsigned int, nop_txn_flags); static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) @@ -9691,6 +9704,9 @@ got_cpu_context: pmu->pmu_disable = perf_pmu_nop_void; } + if (!pmu->check_period) + pmu->check_period = perf_event_nop_int; + if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4a9937076331..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) size = sizeof(struct ring_buffer); size += nr_pages * sizeof(void *); + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) + goto fail; + rb = kzalloc(size, GFP_KERNEL); if (!rb) goto fail; diff --git a/kernel/exit.c b/kernel/exit.c index 3fb7be001964..2639a30a8aa5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p) return NULL; } -static struct task_struct *find_child_reaper(struct task_struct *father) +static struct task_struct *find_child_reaper(struct task_struct *father, + struct list_head *dead) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *reaper = pid_ns->child_reaper; + struct task_struct *p, *n; if (likely(reaper != father)) return reaper; @@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father) panic("Attempted to kill init! exitcode=0x%08x\n", father->signal->group_exit_code ?: father->exit_code); } + + list_for_each_entry_safe(p, n, dead, ptrace_entry) { + list_del_init(&p->ptrace_entry); + release_task(p); + } + zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); @@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father, exit_ptrace(father, dead); /* Can drop and reacquire tasklist_lock */ - reaper = find_child_reaper(father); + reaper = find_child_reaper(father, dead); if (list_empty(&father->children)) return; diff --git a/kernel/futex.c b/kernel/futex.c index fdd312da0992..a0514e01c3eb 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2221,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) * decrement the counter at queue_unlock() when some error has * occurred and we don't end up adding the task to the list. */ - hb_waiters_inc(hb); + hb_waiters_inc(hb); /* implies smp_mb(); (A) */ q->lock_ptr = &hb->lock; - spin_lock(&hb->lock); /* implies smp_mb(); (A) */ + spin_lock(&hb->lock); return hb; } @@ -2861,35 +2861,39 @@ retry_private: * and BUG when futex_unlock_pi() interleaves with this. * * Therefore acquire wait_lock while holding hb->lock, but drop the - * latter before calling rt_mutex_start_proxy_lock(). This still fully - * serializes against futex_unlock_pi() as that does the exact same - * lock handoff sequence. + * latter before calling __rt_mutex_start_proxy_lock(). This + * interleaves with futex_unlock_pi() -- which does a similar lock + * handoff -- such that the latter can observe the futex_q::pi_state + * before __rt_mutex_start_proxy_lock() is done. */ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); spin_unlock(q.lock_ptr); + /* + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter + * such that futex_unlock_pi() is guaranteed to observe the waiter when + * it sees the futex_q::pi_state. + */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); if (ret) { if (ret == 1) ret = 0; - - spin_lock(q.lock_ptr); - goto no_block; + goto cleanup; } - if (unlikely(to)) hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); +cleanup: spin_lock(q.lock_ptr); /* - * If we failed to acquire the lock (signal/timeout), we must + * If we failed to acquire the lock (deadlock/signal/timeout), we must * first acquire the hb->lock before removing the lock from the - * rt_mutex waitqueue, such that we can keep the hb and rt_mutex - * wait lists consistent. + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait + * lists consistent. * * In particular; it is important that futex_unlock_pi() can not * observe this inconsistency. @@ -3013,6 +3017,10 @@ retry: * there is no point where we hold neither; and therefore * wake_futex_pi() must observe a state consistent with what we * observed. + * + * In particular; this forces __rt_mutex_start_proxy() to + * complete such that we're guaranteed to observe the + * rt_waiter. Also see the WARN in wake_futex_pi(). */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(&hb->lock); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 581edcc63c26..978d63a8261c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, rt_mutex_set_owner(lock, NULL); } +/** + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take + * @waiter: the pre-initialized rt_mutex_waiter + * @task: the task to prepare + * + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. + * + * NOTE: does _NOT_ remove the @waiter on failure; must either call + * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. + * + * Returns: + * 0 - task blocked on lock + * 1 - acquired the lock for task, caller should wake it up + * <0 - error + * + * Special API call for PI-futex support. + */ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) { int ret; + lockdep_assert_held(&lock->wait_lock); + if (try_to_take_rt_mutex(lock, task, NULL)) return 1; @@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } - if (unlikely(ret)) - remove_waiter(lock, waiter); - debug_rt_mutex_print_deadlock(waiter); return ret; @@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, * @waiter: the pre-initialized rt_mutex_waiter * @task: the task to prepare * + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. + * + * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter + * on failure. + * * Returns: * 0 - task blocked on lock * 1 - acquired the lock for task, caller should wake it up * <0 - error * - * Special API call for FUTEX_REQUEUE_PI support. + * Special API call for PI-futex support. */ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, @@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, raw_spin_lock_irq(&lock->wait_lock); ret = __rt_mutex_start_proxy_lock(lock, waiter, task); + if (unlikely(ret)) + remove_waiter(lock, waiter); raw_spin_unlock_irq(&lock->wait_lock); return ret; @@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, * @lock: the rt_mutex we were woken on * @waiter: the pre-initialized rt_mutex_waiter * - * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). + * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or + * rt_mutex_wait_proxy_lock(). * * Unless we acquired the lock; we're still enqueued on the wait-list and can * in fact still be granted ownership until we're removed. Therefore we can diff --git a/kernel/relay.c b/kernel/relay.c index 04f248644e06..9e0f52375487 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan, dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR, buf, &chan->is_global); + if (IS_ERR(dentry)) + dentry = NULL; kfree(tmpname); @@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) dentry = chan->cb->create_buf_file(NULL, NULL, S_IRUSR, buf, &chan->is_global); - if (WARN_ON(dentry)) + if (IS_ERR_OR_NULL(dentry)) goto free_buf; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 50aa2aba69bd..310d0637fe4b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p #ifdef CONFIG_SCHED_SMT DEFINE_STATIC_KEY_FALSE(sched_smt_present); +EXPORT_SYMBOL_GPL(sched_smt_present); static inline void set_idle_cores(int cpu, int val) { diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index fe24de3fbc93..0e97ca9306ef 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -124,6 +124,7 @@ * sampling of the aggregate task states would be. */ +#include "../workqueue_internal.h" #include <linux/sched/loadavg.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> @@ -321,7 +322,7 @@ static bool update_stats(struct psi_group *group) expires = group->next_update; if (now < expires) goto out; - if (now - expires > psi_period) + if (now - expires >= psi_period) missed_periods = div_u64(now - expires, psi_period); /* @@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu, groupc->tasks[t]++; write_seqcount_end(&groupc->seq); - - if (!delayed_work_pending(&group->clock_work)) - schedule_delayed_work(&group->clock_work, PSI_FREQ); } static struct psi_group *iterate_groups(struct task_struct *task, void **iter) @@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set) { int cpu = task_cpu(task); struct psi_group *group; + bool wake_clock = true; void *iter = NULL; if (!task->pid) @@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set) task->psi_flags &= ~clear; task->psi_flags |= set; - while ((group = iterate_groups(task, &iter))) + /* + * Periodic aggregation shuts off if there is a period of no + * task changes, so we wake it back up if necessary. However, + * don't do this if the task change is the aggregation worker + * itself going to sleep, or we'll ping-pong forever. + */ + if (unlikely((clear & TSK_RUNNING) && + (task->flags & PF_WQ_WORKER) && + wq_worker_last_func(task) == psi_update_work)) + wake_clock = false; + + while ((group = iterate_groups(task, &iter))) { psi_group_change(group, cpu, clear, set); + if (wake_clock && !delayed_work_pending(&group->clock_work)) + schedule_delayed_work(&group->clock_work, PSI_FREQ); + } } void psi_memstall_tick(struct task_struct *task, int cpu) diff --git a/kernel/signal.c b/kernel/signal.c index e1d7ad8e6ab1..57b7771e20d7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in } EXPORT_SYMBOL_GPL(dequeue_signal); +static int dequeue_synchronous_signal(kernel_siginfo_t *info) +{ + struct task_struct *tsk = current; + struct sigpending *pending = &tsk->pending; + struct sigqueue *q, *sync = NULL; + + /* + * Might a synchronous signal be in the queue? + */ + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) + return 0; + + /* + * Return the first synchronous signal in the queue. + */ + list_for_each_entry(q, &pending->list, list) { + /* Synchronous signals have a postive si_code */ + if ((q->info.si_code > SI_USER) && + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { + sync = q; + goto next; + } + } + return 0; +next: + /* + * Check if there is another siginfo for the same signal. + */ + list_for_each_entry_continue(q, &pending->list, list) { + if (q->info.si_signo == sync->info.si_signo) + goto still_pending; + } + + sigdelset(&pending->signal, sync->info.si_signo); + recalc_sigpending(); +still_pending: + list_del_init(&sync->list); + copy_siginfo(info, &sync->info); + __sigqueue_free(sync); + return info->si_signo; +} + /* * Tell a process that it has a new active signal.. * @@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc result = TRACE_SIGNAL_DELIVERED; /* - * Skip useless siginfo allocation for SIGKILL SIGSTOP, - * and kernel threads. + * Skip useless siginfo allocation for SIGKILL and kernel threads. */ - if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) + if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) goto out_set; /* @@ -2394,6 +2435,14 @@ relock: goto relock; } + /* Has this task already been marked for death? */ + if (signal_group_exit(signal)) { + ksig->info.si_signo = signr = SIGKILL; + sigdelset(¤t->pending.signal, SIGKILL); + recalc_sigpending(); + goto fatal; + } + for (;;) { struct k_sigaction *ka; @@ -2407,7 +2456,15 @@ relock: goto relock; } - signr = dequeue_signal(current, ¤t->blocked, &ksig->info); + /* + * Signals generated by the execution of an instruction + * need to be delivered before any other pending signals + * so that the instruction pointer in the signal stack + * frame points to the faulting instruction. + */ + signr = dequeue_synchronous_signal(&ksig->info); + if (!signr) + signr = dequeue_signal(current, ¤t->blocked, &ksig->info); if (!signr) break; /* will return 0 */ @@ -2489,6 +2546,7 @@ relock: continue; } + fatal: spin_unlock_irq(&sighand->siglock); /* diff --git a/kernel/smp.c b/kernel/smp.c index 163c451af42e..f4cf1b0bb3b8 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -584,8 +584,6 @@ void __init smp_init(void) num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); - /* Final decision about SMT support */ - cpu_smt_check_topology(); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b068adb9da1..f1a86a0d881d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) { - int err; - - mutex_lock(&bpf_event_mutex); - err = __bpf_probe_register(btp, prog); - mutex_unlock(&bpf_event_mutex); - return err; + return __bpf_probe_register(btp, prog); } int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) { - int err; - - mutex_lock(&bpf_event_mutex); - err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); - mutex_unlock(&bpf_event_mutex); - return err; + return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); } int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file const char tgid_space[] = " "; const char space[] = " "; + print_event_info(buf, m); + seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { static nokprobe_inline int fetch_store_strlen(unsigned long addr) { - mm_segment_t old_fs; int ret, len = 0; u8 c; - old_fs = get_fs(); - set_fs(KERNEL_DS); - pagefault_disable(); - do { - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); + ret = probe_mem_read(&c, (u8 *)addr + len, 1); len++; } while (c && ret == 0 && len < MAX_STRING_SIZE); - pagefault_enable(); - set_fs(old_fs); - return (ret < 0) ? ret : len; } diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 5c56afc17cf8..4737bb8c07a3 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, if (unlikely(arg->dynamic)) *dl = make_data_loc(maxlen, dyndata - base); ret = process_fetch_insn(arg->code, regs, dl, base); - if (unlikely(ret < 0 && arg->dynamic)) + if (unlikely(ret < 0 && arg->dynamic)) { *dl = make_data_loc(0, dyndata - base); - else + } else { dyndata += ret; + maxlen -= ret; + } } } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e335576b9411..9bde07c06362 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -5,7 +5,7 @@ * Copyright (C) IBM Corporation, 2010-2012 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> */ -#define pr_fmt(fmt) "trace_kprobe: " fmt +#define pr_fmt(fmt) "trace_uprobe: " fmt #include <linux/ctype.h> #include <linux/module.h> @@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base) if (ret >= 0) { if (ret == maxlen) dst[ret - 1] = '\0'; + else + /* + * Include the terminating null byte. In this case it + * was copied by strncpy_from_user but not accounted + * for in ret. + */ + ret++; *(u32 *)dest = make_data_loc(ret, (void *)dst - base); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 392be4b252f6..fc5d23d752a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) } /** + * wq_worker_last_func - retrieve worker's last work function + * + * Determine the last function a worker executed. This is called from + * the scheduler to get a worker's last known identity. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + * + * Return: + * The last work function %current executed as a worker, NULL if it + * hasn't executed any work yet. + */ +work_func_t wq_worker_last_func(struct task_struct *task) +{ + struct worker *worker = kthread_data(task); + + return worker->last_func; +} + +/** * worker_set_flags - set worker flags and adjust nr_running accordingly * @worker: self * @flags: flags to set @@ -2184,6 +2204,9 @@ __acquires(&pool->lock) if (unlikely(cpu_intensive)) worker_clr_flags(worker, WORKER_CPU_INTENSIVE); + /* tag the worker for identification in schedule() */ + worker->last_func = worker->current_func; + /* we're done with it, release */ hash_del(&worker->hentry); worker->current_work = NULL; diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 66fbb5a9e633..cb68b03ca89a 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -53,6 +53,9 @@ struct worker { /* used only by rescuers to point to the target workqueue */ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ + + /* used by the scheduler to determine a worker's last known identity */ + work_func_t last_func; }; /** @@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void) /* * Scheduler hooks for concurrency managed workqueue. Only to be used from - * sched/core.c and workqueue.c. + * sched/ and workqueue.c. */ void wq_worker_waking_up(struct task_struct *task, int cpu); struct task_struct *wq_worker_sleeping(struct task_struct *task); +work_func_t wq_worker_last_func(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c6659cb37033..59875eb278ea 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -768,9 +768,11 @@ all_leaves_cluster_together: new_s0->index_key[i] = ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); - blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); - pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); - new_s0->index_key[keylen - 1] &= ~blank; + if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { + blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); + new_s0->index_key[keylen - 1] &= ~blank; + } /* This now reduces to a node splitting exercise for which we'll need * to regenerate the disparity table. diff --git a/lib/crc32.c b/lib/crc32.c index 45b1d67a1767..4a20455d1f61 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); -u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); -u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); +u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); +u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); /* * This multiplies the polynomials x and y modulo the given modulus. diff --git a/lib/test_kmod.c b/lib/test_kmod.c index d82d022111e0..9cf77628fc91 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config) config->test_driver = NULL; kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; } static void kmod_config_free(struct kmod_test_device *test_dev) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 6a8ac7626797..e52f8cafe227 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt) static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, int cnt, bool slow) { - struct rhltable rhlt; + struct rhltable *rhlt; unsigned int i, ret; const char *key; int err = 0; - err = rhltable_init(&rhlt, &test_rht_params_dup); - if (WARN_ON(err)) + rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); + if (WARN_ON(!rhlt)) + return -EINVAL; + + err = rhltable_init(rhlt, &test_rht_params_dup); + if (WARN_ON(err)) { + kfree(rhlt); return err; + } for (i = 0; i < cnt; i++) { rhl_test_objects[i].value.tid = i; - key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); + key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); key += test_rht_params_dup.key_offset; if (slow) { - err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, + err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, &rhl_test_objects[i].list_node.rhead)); if (err == -EAGAIN) err = 0; } else - err = rhltable_insert(&rhlt, + err = rhltable_insert(rhlt, &rhl_test_objects[i].list_node, test_rht_params_dup); if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) goto skip_print; } - ret = print_ht(&rhlt); + ret = print_ht(rhlt); WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); skip_print: - rhltable_destroy(&rhlt); + rhltable_destroy(rhlt); + kfree(rhlt); return 0; } diff --git a/mm/debug.c b/mm/debug.c index 0abb987dad9b..1611cf00a137 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { void __dump_page(struct page *page, const char *reason) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping; bool page_poisoned = PagePoisoned(page); int mapcount; @@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason) goto hex_only; } + mapping = page_mapping(page); + /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to @@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, if (!pmd_present(pmd)) return 0; - if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { + if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || + pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they diff --git a/mm/hugetlb.c b/mm/hugetlb.c index df2e7dd5ff17..afef61656c1e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4268,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } if (ret & VM_FAULT_RETRY) { - if (nonblocking) + if (nonblocking && + !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; *nr_pages = 0; /* diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 0a14fcff70ed..5d1065efbd47 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -5,7 +5,10 @@ UBSAN_SANITIZE_generic.o := n UBSAN_SANITIZE_tags.o := n KCOV_INSTRUMENT := n +CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_generic.o = -pg +CFLAGS_REMOVE_tags.o = -pg + # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 73c9cbfdedf4..09b534fbba17 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) * get different tags. */ static u8 assign_tag(struct kmem_cache *cache, const void *object, - bool init, bool krealloc) + bool init, bool keep_tag) { - /* Reuse the same tag for krealloc'ed objects. */ - if (krealloc) + /* + * 1. When an object is kmalloc()'ed, two hooks are called: + * kasan_slab_alloc() and kasan_kmalloc(). We assign the + * tag only in the first one. + * 2. We reuse the same tag for krealloc'ed objects. + */ + if (keep_tag) return get_tag(object); /* @@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, - gfp_t flags) -{ - return kasan_kmalloc(cache, object, cache->object_size, flags); -} - static inline bool shadow_invalid(u8 tag, s8 shadow_byte) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) @@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) } static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags, bool krealloc) + size_t size, gfp_t flags, bool keep_tag) { unsigned long redzone_start; unsigned long redzone_end; @@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_SHADOW_SCALE_SIZE); if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - tag = assign_tag(cache, object, false, krealloc); + tag = assign_tag(cache, object, false, keep_tag); /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ kasan_unpoison_shadow(set_tag(object, tag), size); @@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, return set_tag(object, tag); } +void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, + gfp_t flags) +{ + return __kasan_kmalloc(cache, object, cache->object_size, flags, false); +} + void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { - return __kasan_kmalloc(cache, object, size, flags, false); + return __kasan_kmalloc(cache, object, size, flags, true); } EXPORT_SYMBOL(kasan_kmalloc); diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 0777649e07c4..63fca3172659 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -46,7 +46,7 @@ void kasan_init_tags(void) int cpu; for_each_possible_cpu(cpu) - per_cpu(prng_state, cpu) = get_random_u32(); + per_cpu(prng_state, cpu) = (u32)get_cycles(); } /* diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f9d9dc250428..707fa5579f66 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, unsigned long flags; struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; + unsigned long untagged_ptr; object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); if (!object) { @@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, write_lock_irqsave(&kmemleak_lock, flags); - min_addr = min(min_addr, ptr); - max_addr = max(max_addr, ptr + size); + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); + min_addr = min(min_addr, untagged_ptr); + max_addr = max(max_addr, untagged_ptr + size); link = &object_tree_root.rb_node; rb_parent = NULL; while (*link) { @@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end, unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long flags; + unsigned long untagged_ptr; read_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { @@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end, pointer = *ptr; kasan_enable_current(); - if (pointer < min_addr || pointer >= max_addr) + untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); + if (untagged_ptr < min_addr || untagged_ptr >= max_addr) continue; /* diff --git a/mm/memblock.c b/mm/memblock.c index 022d4cbb3618..ea31045ba704 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -26,6 +26,13 @@ #include "internal.h" +#define INIT_MEMBLOCK_REGIONS 128 +#define INIT_PHYSMEM_REGIONS 4 + +#ifndef INIT_MEMBLOCK_RESERVED_REGIONS +# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS +#endif + /** * DOC: memblock overview * @@ -92,7 +99,7 @@ unsigned long max_pfn; unsigned long long max_possible_pfn; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; -static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; +static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; #endif @@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = { .reserved.regions = memblock_reserved_init_regions, .reserved.cnt = 1, /* empty dummy entry */ - .reserved.max = INIT_MEMBLOCK_REGIONS, + .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, .reserved.name = "reserved", #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7c72f2a95785..831be5ff5f4d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, if (fail || tk->addr_valid == 0) { pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", pfn, tk->tsk->comm, tk->tsk->pid); - force_sig(SIGKILL, tk->tsk); + do_send_sig_info(SIGKILL, SEND_SIG_PRIV, + tk->tsk, PIDTYPE_PID); } /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b9a667d36c55..1ad28323fb9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) return PageBuddy(page) && page_order(page) >= pageblock_order; } -/* Return the start of the next active pageblock after a given page */ -static struct page *next_active_pageblock(struct page *page) +/* Return the pfn of the start of the next active pageblock after a given pfn */ +static unsigned long next_active_pageblock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); + /* Ensure the starting page is pageblock-aligned */ - BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); + BUG_ON(pfn & (pageblock_nr_pages - 1)); /* If the entire pageblock is free, move to the end of free page */ if (pageblock_free(page)) { @@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) /* be careful. we don't have locks, page_order can be changed.*/ order = page_order(page); if ((order < MAX_ORDER) && (order >= pageblock_order)) - return page + (1 << order); + return pfn + (1 << order); } - return page + pageblock_nr_pages; + return pfn + pageblock_nr_pages; } -static bool is_pageblock_removable_nolock(struct page *page) +static bool is_pageblock_removable_nolock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); struct zone *zone; - unsigned long pfn; /* * We have to be careful here because we are iterating over memory @@ -1232,12 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) /* Checks if this range of memory is likely to be hot-removable. */ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { - struct page *page = pfn_to_page(start_pfn); - struct page *end_page = page + nr_pages; + unsigned long end_pfn, pfn; + + end_pfn = min(start_pfn + nr_pages, + zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); /* Check the starting page of each pageblock within the range */ - for (; page < end_page; page = next_active_pageblock(page)) { - if (!is_pageblock_removable_nolock(page)) + for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { + if (!is_pageblock_removable_nolock(pfn)) return false; cond_resched(); } @@ -1273,6 +1277,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, i++; if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) continue; + /* Check if we got outside of the zone */ + if (zone && !zone_spans_pfn(zone, pfn + i)) + return 0; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; @@ -1301,23 +1308,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, static unsigned long scan_movable_pages(unsigned long start, unsigned long end) { unsigned long pfn; - struct page *page; + for (pfn = start; pfn < end; pfn++) { - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageLRU(page)) - return pfn; - if (__PageMovable(page)) - return pfn; - if (PageHuge(page)) { - if (hugepage_migration_supported(page_hstate(page)) && - page_huge_active(page)) - return pfn; - else - pfn = round_up(pfn + 1, - 1 << compound_order(page)) - 1; - } - } + struct page *page, *head; + unsigned long skip; + + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + if (PageLRU(page)) + return pfn; + if (__PageMovable(page)) + return pfn; + + if (!PageHuge(page)) + continue; + head = compound_head(page); + if (hugepage_migration_supported(page_hstate(head)) && + page_huge_active(head)) + return pfn; + skip = (1 << compound_order(head)) - (page - head); + pfn += skip - 1; } return 0; } @@ -1344,7 +1355,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; - int not_managed = 0; int ret = 0; LIST_HEAD(source); @@ -1392,7 +1402,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) else ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); if (!ret) { /* Success */ - put_page(page); list_add_tail(&page->lru, &source); if (!__PageMovable(page)) inc_node_page_state(page, NR_ISOLATED_ANON + @@ -1401,22 +1410,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) } else { pr_warn("failed to isolate pfn %lx\n", pfn); dump_page(page, "isolation failed"); - put_page(page); - /* Because we don't have big zone->lock. we should - check this again here. */ - if (page_count(page)) { - not_managed++; - ret = -EBUSY; - break; - } } + put_page(page); } if (!list_empty(&source)) { - if (not_managed) { - putback_movable_pages(&source); - goto out; - } - /* Allocate a new page from the nearest neighbor node */ ret = migrate_pages(&source, new_node_page, NULL, 0, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); @@ -1429,7 +1426,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) putback_movable_pages(&source); } } -out: + return ret; } @@ -1576,7 +1573,6 @@ static int __ref __offline_pages(unsigned long start_pfn, we assume this for now. .*/ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) { - mem_hotplug_done(); ret = -EINVAL; reason = "multizone range"; goto failed_removal; @@ -1591,7 +1587,6 @@ static int __ref __offline_pages(unsigned long start_pfn, MIGRATE_MOVABLE, SKIP_HWPOISON | REPORT_FAILURE); if (ret) { - mem_hotplug_done(); reason = "failure to isolate range"; goto failed_removal; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d4496d9d34f5..ee2bce59d2bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) @@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy, int uninitialized_var(pval); nodemask_t nodes; - if (nmask != NULL && maxnode < MAX_NUMNODES) + if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); @@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) diff --git a/mm/migrate.c b/mm/migrate.c index a16b15090df3..d4fd680be3b0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, /* Simple case, sync compaction */ if (mode != MIGRATE_ASYNC) { do { - get_bh(bh); lock_buffer(bh); bh = bh->b_this_page; @@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, /* async case, we cannot block on lock_buffer so use trylock_buffer */ do { - get_bh(bh); if (!trylock_buffer(bh)) { /* * We failed to lock the buffer and cannot stall in * async migration. Release the taken locks */ struct buffer_head *failed_bh = bh; - put_bh(failed_bh); bh = head; while (bh != failed_bh) { unlock_buffer(bh); - put_bh(bh); bh = bh->b_this_page; } return false; @@ -818,7 +814,6 @@ unlock_buffers: bh = head; do { unlock_buffer(bh); - put_bh(bh); bh = bh->b_this_page; } while (bh != head); @@ -1135,10 +1130,13 @@ out: * If migration is successful, decrease refcount of the newpage * which will not free the page because new page owner increased * refcounter. As well, if it is LRU page, add the page to LRU - * list in here. + * list in here. Use the old state of the isolated source page to + * determine if we migrated a LRU page. newpage was already unlocked + * and possibly modified by its owner - don't rely on the page + * state. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (unlikely(__PageMovable(newpage))) + if (unlikely(!is_lru)) put_page(newpage); else putback_lru_page(newpage); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f0e8cd9edb1a..26ea8636758f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -647,8 +647,8 @@ static int oom_reaper(void *unused) static void wake_oom_reaper(struct task_struct *tsk) { - /* tsk is already queued? */ - if (tsk == oom_reaper_list || tsk->oom_reaper_list) + /* mm is already queued? */ + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) return; get_task_struct(tsk); @@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message) * still freeing memory. */ read_lock(&tasklist_lock); + + /* + * The task 'p' might have already exited before reaching here. The + * put_task_struct() will free task_struct 'p' while the loop still try + * to access the field of 'p', so, get an extra reference. + */ + get_task_struct(p); for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; @@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) } } } + put_task_struct(p); read_unlock(&tasklist_lock); /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d295c9bc01a8..0b9f577b1a2a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) max_boost = mult_frac(zone->_watermark[WMARK_HIGH], watermark_boost_factor, 10000); + + /* + * high watermark may be uninitialised if fragmentation occurs + * very early in boot so do not boost. We do not fall + * through and boost by pageblock_nr_pages as failing + * allocations that early means that reclaim is not going + * to help and it may even be impossible to reclaim the + * boosted watermark resulting in a hang. + */ + if (!max_boost) + return; + max_boost = max(pageblock_nr_pages, max_boost); zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, @@ -4675,11 +4687,11 @@ refill: /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ - page_ref_add(page, size - 1); + page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); /* reset page count bias and offset to start of new frag */ nc->pfmemalloc = page_is_pfmemalloc(page); - nc->pagecnt_bias = size; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc->offset = size; } @@ -4695,10 +4707,10 @@ refill: size = nc->size; #endif /* OK, page count is 0, we can safely set it */ - set_page_count(page, size); + set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); /* reset page count bias and offset to start of new frag */ - nc->pagecnt_bias = size; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; } @@ -5701,18 +5713,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, cond_resched(); } } -#ifdef CONFIG_SPARSEMEM - /* - * If the zone does not span the rest of the section then - * we should at least initialize those pages. Otherwise we - * could blow up on a poisoned page in some paths which depend - * on full sections being initialized (e.g. memory hotplug). - */ - while (end_pfn % PAGES_PER_SECTION) { - __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); - end_pfn++; - } -#endif } #ifdef CONFIG_ZONE_DEVICE diff --git a/mm/page_ext.c b/mm/page_ext.c index ae44f7adbe07..8c78b8d45117 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -398,10 +398,8 @@ void __init page_ext_init(void) * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... - * - * Take into account DEFERRED_STRUCT_PAGE_INIT. */ - if (early_pfn_to_nid(pfn) != nid) + if (pfn_to_nid(pfn) != nid) continue; if (init_section_page_ext(pfn, nid)) goto oom; diff --git a/mm/shmem.c b/mm/shmem.c index 6ece1e2fe76e..0905215fb016 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr * No ordinary (disk based) filesystem counts links as inodes; * but each new link needs a new dentry, pinning lowmem, and * tmpfs dentries cannot be pruned until they are unlinked. + * But if an O_TMPFILE file is linked into the tmpfs, the + * first link must skip that, to get the accounting right. */ - ret = shmem_reserve_inode(inode->i_sb); - if (ret) - goto out; + if (inode->i_nlink) { + ret = shmem_reserve_inode(inode->i_sb); + if (ret) + goto out; + } dir->i_size += BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); diff --git a/mm/slab.c b/mm/slab.c index 78eb8c5bf4e4..91c1863df93d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, void *freelist; void *addr = page_address(page); - page->s_mem = kasan_reset_tag(addr) + colour_off; + page->s_mem = addr + colour_off; page->active = 0; if (OBJFREELIST_SLAB(cachep)) @@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, /* Slab management obj is off-slab. */ freelist = kmem_cache_alloc_node(cachep->freelist_cache, local_flags, nodeid); + freelist = kasan_reset_tag(freelist); if (!freelist) return NULL; } else { @@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, offset *= cachep->colour_off; + /* + * Call kasan_poison_slab() before calling alloc_slabmgmt(), so + * page_address() in the latter returns a non-tagged pointer, + * as it should be for slab pages. + */ + kasan_poison_slab(page); + /* Get slab management. */ freelist = alloc_slabmgmt(cachep, page, offset, local_flags & ~GFP_CONSTRAINT_MASK, page_node); @@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, slab_map_pages(cachep, page, freelist); - kasan_poison_slab(page); cache_init_objs(cachep, page); if (gfpflags_allow_blocking(local_flags)) @@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *ret = slab_alloc(cachep, flags, _RET_IP_); - ret = kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc(_RET_IP_, ret, cachep->object_size, cachep->size, flags); @@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - ret = kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep->object_size, cachep->size, flags, nodeid); @@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, unsigned int objnr; unsigned long offset; + ptr = kasan_reset_tag(ptr); + /* Find and validate object. */ cachep = page->slab_cache; objnr = obj_to_index(cachep, page, (void *)ptr); diff --git a/mm/slab.h b/mm/slab.h index 4190c24ef0e9..384105318779 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, flags &= gfp_allowed_mask; for (i = 0; i < size; i++) { - void *object = p[i]; - - kmemleak_alloc_recursive(object, s->object_size, 1, + p[i] = kasan_slab_alloc(s, p[i], flags); + /* As p[i] might get tagged, call kmemleak hook after KASAN. */ + kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); - p[i] = kasan_slab_alloc(s, object, flags); } if (memcg_kmem_enabled()) diff --git a/mm/slab_common.c b/mm/slab_common.c index 81732d05e74a..f9d89c1b5977 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) flags |= __GFP_COMP; page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; - kmemleak_alloc(ret, size, 1, flags); ret = kasan_kmalloc_large(ret, size, flags); + /* As ret might get tagged, call kmemleak hook after KASAN. */ + kmemleak_alloc(ret, size, 1, flags); return ret; } EXPORT_SYMBOL(kmalloc_order); diff --git a/mm/slub.c b/mm/slub.c index 1e3d0ec4e200..dc777761b6b7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, unsigned long ptr_addr) { #ifdef CONFIG_SLAB_FREELIST_HARDENED - return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); + /* + * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. + * Normally, this doesn't cause any issues, as both set_freepointer() + * and get_freepointer() are called with a pointer with the same tag. + * However, there are some issues with CONFIG_SLUB_DEBUG code. For + * example, when __free_slub() iterates over objects in a cache, it + * passes untagged pointers to check_object(). check_object() in turns + * calls get_freepointer() with an untagged pointer, which causes the + * freepointer to be restored incorrectly. + */ + return (void *)((unsigned long)ptr ^ s->random ^ + (unsigned long)kasan_reset_tag((void *)ptr_addr)); #else return ptr; #endif @@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) __p < (__addr) + (__objects) * (__s)->size; \ __p += (__s)->size) -#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ - for (__p = fixup_red_left(__s, __addr), __idx = 1; \ - __idx <= __objects; \ - __p += (__s)->size, __idx++) - /* Determine object index from a given position */ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) { - return (p - addr) / s->size; + return (kasan_reset_tag(p) - addr) / s->size; } static inline unsigned int order_objects(unsigned int order, unsigned int size) @@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, return 1; base = page_address(page); + object = kasan_reset_tag(object); object = restore_red_left(s, object); if (object < base || object >= base + page->objects * s->size || (object - base) % s->size) { @@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, init_tracking(s, object); } +static void setup_page_debug(struct kmem_cache *s, void *addr, int order) +{ + if (!(s->flags & SLAB_POISON)) + return; + + metadata_access_enable(); + memset(addr, POISON_INUSE, PAGE_SIZE << order); + metadata_access_disable(); +} + static inline int alloc_consistency_checks(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) @@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, #else /* !CONFIG_SLUB_DEBUG */ static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} +static inline void setup_page_debug(struct kmem_cache *s, + void *addr, int order) {} static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } @@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, */ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) { + ptr = kasan_kmalloc_large(ptr, size, flags); + /* As ptr might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ptr, size, 1, flags); - return kasan_kmalloc_large(ptr, size, flags); + return ptr; } static __always_inline void kfree_hook(void *x) @@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); + kasan_poison_slab(page); + start = page_address(page); - if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << order); - - kasan_poison_slab(page); + setup_page_debug(s, start, order); shuffle = shuffle_freelist(s, page); if (!shuffle) { - for_each_object_idx(p, idx, s, start, page->objects) { - if (likely(idx < page->objects)) { - next = p + s->size; - next = setup_object(s, page, next); - set_freepointer(s, p, next); - } else - set_freepointer(s, p, NULL); - } start = fixup_red_left(s, start); start = setup_object(s, page, start); page->freelist = start; + for (idx = 0, p = start; idx < page->objects - 1; idx++) { + next = p + s->size; + next = setup_object(s, page, next); + set_freepointer(s, p, next); + p = next; + } + set_freepointer(s, p, NULL); } page->inuse = page->objects; diff --git a/mm/swap.c b/mm/swap.c index 4929bc1be60e..4d7d37eb3c40 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) { } -static bool need_activate_page_drain(int cpu) -{ - return false; -} - void activate_page(struct page *page) { struct zone *zone = page_zone(page); @@ -653,13 +648,15 @@ void lru_add_drain(void) put_cpu(); } +#ifdef CONFIG_SMP + +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); + static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); - /* * Doesn't need any cpu hotplug locking because we do rely on per-cpu * kworkers being shut down before our page_alloc_cpu_dead callback is @@ -702,6 +699,12 @@ void lru_add_drain_all(void) mutex_unlock(&lock); } +#else +void lru_add_drain_all(void) +{ + lru_add_drain(); +} +#endif /** * release_pages - batched put_page() diff --git a/mm/util.c b/mm/util.c index 1ea055138043..379319b1bcfd 100644 --- a/mm/util.c +++ b/mm/util.c @@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len) { void *p; - p = kmalloc_track_caller(len, GFP_USER); + p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); diff --git a/mm/vmscan.c b/mm/vmscan.c index a714c4f800e9..e979705bbf32 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, delta = freeable / 2; } - /* - * Make sure we apply some minimal pressure on default priority - * even on small cgroups. Stale objects are not only consuming memory - * by themselves, but can also hold a reference to a dying cgroup, - * preventing it from being reclaimed. A dying cgroup with all - * corresponding structures like per-cpu stats and kmem caches - * can be really big, so it may lead to a significant waste of memory. - */ - delta = max_t(unsigned long long, delta, min(freeable, batch_size)); - total_scan += delta; if (total_scan < 0) { pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index e8090f099eb8..ef0dec20c7d8 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); + /* free the TID stats immediately */ + cfg80211_sinfo_release_content(&sinfo); + dev_put(real_netdev); if (ret == -ENOENT) { /* Node is not associated anymore! It would be diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 508f4416dfc9..415d494cbe22 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -20,7 +20,6 @@ #include "main.h" #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/errno.h> #include <linux/gfp.h> @@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) parent_dev = __dev_get_by_index((struct net *)parent_net, dev_get_iflink(net_dev)); /* if we got a NULL parent_dev there is something broken.. */ - if (WARN(!parent_dev, "Cannot find parent device")) + if (!parent_dev) { + pr_err("Cannot find parent device\n"); return false; + } if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) return false; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5db5a0a4c959..ffc83bebfe40 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, netif_trans_update(soft_iface); vid = batadv_get_vid(skb, 0); + + skb_reset_mac_header(skb); ethhdr = eth_hdr(skb); switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: + if (!pskb_may_pull(skb, sizeof(*vhdr))) + goto dropped; vhdr = vlan_eth_hdr(skb); /* drop batman-in-batman packets to prevent loops */ diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fa2644d276ef..e31e1b20f7f4 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -13,27 +13,13 @@ #include <net/sock.h> #include <net/tcp.h> -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) -{ - u32 ret; - - preempt_disable(); - rcu_read_lock(); - bpf_cgroup_storage_set(storage); - ret = BPF_PROG_RUN(prog, ctx); - rcu_read_unlock(); - preempt_enable(); - - return ret; -} - -static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, - u32 *time) +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, + u32 *retval, u32 *time) { struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; enum bpf_cgroup_storage_type stype; u64 time_start, time_spent = 0; + int ret = 0; u32 i; for_each_cgroup_storage_type(stype) { @@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, if (!repeat) repeat = 1; + + rcu_read_lock(); + preempt_disable(); time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { - *ret = bpf_test_run_one(prog, ctx, storage); + bpf_cgroup_storage_set(storage); + *retval = BPF_PROG_RUN(prog, ctx); + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (need_resched()) { - if (signal_pending(current)) - break; time_spent += ktime_get_ns() - time_start; + preempt_enable(); + rcu_read_unlock(); + cond_resched(); + + rcu_read_lock(); + preempt_disable(); time_start = ktime_get_ns(); } } time_spent += ktime_get_ns() - time_start; + preempt_enable(); + rcu_read_unlock(); + do_div(time_spent, repeat); *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; for_each_cgroup_storage_type(stype) bpf_cgroup_storage_free(storage[stype]); - return 0; + return ret; } static int bpf_test_finish(const union bpf_attr *kattr, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 3aeff0895669..ac92b2eb32b1 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br, return; br_multicast_update_query_timer(br, query, max_delay); - - /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, - * the arrival port for IGMP Queries where the source address - * is 0.0.0.0 should not be added to router port list. - */ - if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || - saddr->proto == htons(ETH_P_IPV6)) - br_multicast_mark_router(br, port); + br_multicast_mark_router(br, port); } static void br_ip4_multicast_query(struct net_bridge *br, diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 5e55cef0cec3..6693e209efe8 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user, xt_compat_lock(NFPROTO_BRIDGE); - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); - if (ret < 0) - goto out_unlock; + if (tmp.nentries) { + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); + if (ret < 0) + goto out_unlock; + } + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 3661cdd927f1..7e71b0df1fbc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con) dout("process_connect on %p tag %d\n", con, (int)con->in_tag); if (con->auth) { + int len = le32_to_cpu(con->in_reply.authorizer_len); + /* * Any connection that defines ->get_authorizer() * should also define ->add_authorizer_challenge() and @@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con) */ if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { ret = con->ops->add_authorizer_challenge( - con, con->auth->authorizer_reply_buf, - le32_to_cpu(con->in_reply.authorizer_len)); + con, con->auth->authorizer_reply_buf, len); if (ret < 0) return ret; @@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con) return 0; } - ret = con->ops->verify_authorizer_reply(con); - if (ret < 0) { - con->error_msg = "bad authorize reply"; - return ret; + if (len) { + ret = con->ops->verify_authorizer_reply(con); + if (ret < 0) { + con->error_msg = "bad authorize reply"; + return ret; + } } } diff --git a/net/compat.c b/net/compat.c index 959d1c51826d..3d348198004f 100644 --- a/net/compat.c +++ b/net/compat.c @@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen) { int err; - struct socket *sock = sockfd_lookup(fd, &err); + struct socket *sock; + + if (optlen > INT_MAX) + return -EINVAL; + sock = sockfd_lookup(fd, &err); if (sock) { err = security_socket_setsockopt(sock, level, optname); if (err) { diff --git a/net/core/dev.c b/net/core/dev.c index 82f20022259d..5d03889502eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, netdev_features_t feature; int feature_bit; - for_each_netdev_feature(&upper_disables, feature_bit) { + for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(upper->wanted_features & feature) && (features & feature)) { @@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper, netdev_features_t feature; int feature_bit; - for_each_netdev_feature(&upper_disables, feature_bit) { + for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(features & feature) && (lower->features & feature)) { netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", @@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev) set_bit(__LINK_STATE_PRESENT, &dev->state); set_bit(__LINK_STATE_START, &dev->state); + /* napi_busy_loop stats accounting wants this */ + dev_net_set(dev, &init_net); + /* Note : We dont allocate pcpu_refcnt for dummy devices, * because users of this 'device' dont need to change * its refcount. diff --git a/net/core/filter.c b/net/core/filter.c index 7559d6835ecb..f7d0004fc160 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); @@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); @@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); @@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); @@ -4112,10 +4108,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, /* Only some socketops are supported */ switch (optname) { case SO_RCVBUF: + val = min_t(u32, val, sysctl_rmem_max); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_SNDBUF: + val = min_t(u32, val, sysctl_wmem_max); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); break; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..2415d9cb9b89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) */ void *netdev_alloc_frag(unsigned int fragsz) { + fragsz = SKB_DATA_ALIGN(fragsz); + return __netdev_alloc_frag(fragsz, GFP_ATOMIC); } EXPORT_SYMBOL(netdev_alloc_frag); @@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) void *napi_alloc_frag(unsigned int fragsz) { + fragsz = SKB_DATA_ALIGN(fragsz); + return __napi_alloc_frag(fragsz, GFP_ATOMIC); } EXPORT_SYMBOL(napi_alloc_frag); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index d6d5c20d7044..8c826603bf36 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) struct sk_psock *psock = container_of(gc, struct sk_psock, gc); /* No sk_callback_lock since already detached. */ - if (psock->parser.enabled) - strp_done(&psock->parser.strp); + strp_done(&psock->parser.strp); cancel_work_sync(&psock->work); diff --git a/net/core/sock.c b/net/core/sock.c index 6aa2e7e0b4fb..bc3512f230a3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) } if (sk_has_memory_pressure(sk)) { - int alloc; + u64 alloc; if (!sk_under_memory_pressure(sk)) return 1; diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 6eb837a47b5c..baaaeb2b2c42 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, u8 pkt, u8 opt, u8 *val, u8 len) { - if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) + if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options) return 0; return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); } @@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, u8 pkt, u8 opt, u8 *val, u8 len) { - if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) + if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options) return 0; return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index d0b3e69c6b39..0962f9201baa 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -56,7 +56,7 @@ #include <net/dn_neigh.h> #include <net/dn_fib.h> -#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) +#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn)) static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; diff --git a/net/dsa/master.c b/net/dsa/master.c index 71bb15f491c8..54f5551fb799 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev) rtnl_unlock(); } +static struct lock_class_key dsa_master_addr_list_lock_key; + int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) { int ret; @@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) wmb(); dev->dsa_ptr = cpu_dp; + lockdep_set_class(&dev->addr_list_lock, + &dsa_master_addr_list_lock_key); ret = dsa_master_ethtool_setup(dev); if (ret) diff --git a/net/dsa/port.c b/net/dsa/port.c index 2d7e01b23572..2a2a878b5ce3 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) { - u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING; struct dsa_switch *ds = dp->ds; int port = dp->index; int err; @@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) return err; } - dsa_port_set_state_now(dp, stp_state); + if (!dp->bridge_dev) + dsa_port_set_state_now(dp, BR_STATE_FORWARDING); return 0; } @@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy) struct dsa_switch *ds = dp->ds; int port = dp->index; - dsa_port_set_state_now(dp, BR_STATE_DISABLED); + if (!dp->bridge_dev) + dsa_port_set_state_now(dp, BR_STATE_DISABLED); if (ds->ops->port_disable) ds->ops->port_disable(ds, port, phy); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3fcc1d01615..a1c9fe155057 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev) static void dsa_slave_change_rx_flags(struct net_device *dev, int change) { struct net_device *master = dsa_slave_to_master(dev); - - if (change & IFF_ALLMULTI) - dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(master, + dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(master, + dev->flags & IFF_PROMISC ? 1 : -1); + } } static void dsa_slave_set_rx_mode(struct net_device *dev) @@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) int ret; /* Port's PHY and MAC both need to be EEE capable */ - if (!dev->phydev && !dp->pl) + if (!dev->phydev || !dp->pl) return -ENODEV; if (!ds->ops->set_mac_eee) @@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) int ret; /* Port's PHY and MAC both need to be EEE capable */ - if (!dev->phydev && !dp->pl) + if (!dev->phydev || !dp->pl) return -ENODEV; if (!ds->ops->get_mac_eee) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 5459f41fc26f..10e809b296ec 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * skb->len += tailen; skb->data_len += tailen; skb->truesize += tailen; - if (sk) + if (sk && sk_fullsock(sk)) refcount_add(tailen, &sk->sk_wmem_alloc); goto out; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 1a4e9ff02762..5731670c560b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk, + nla_total_size(1) /* INET_DIAG_TOS */ + nla_total_size(1) /* INET_DIAG_TCLASS */ + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4) /* INET_DIAG_CLASS_ID */ + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) @@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto errout; } - if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { + if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || + ext & (1 << (INET_DIAG_TCLASS - 1))) { u32 classid = 0; #ifdef CONFIG_SOCK_CGROUP_DATA classid = sock_cgroup_classid(&sk->sk_cgrp_data); #endif + /* Fallback to socket priority if class id isn't set. + * Classful qdiscs use it as direct reference to class. + * For cgroup2 classid is always zero. + */ + if (!classid) + classid = sk->sk_priority; if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) goto errout; diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d757b9642d0d..be778599bfed 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, atomic_set(&p->rid, 0); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; + p->n_redirects = 0; /* 60*HZ is arbitrary, but chosen enough high so that the first * calculation of tokens is at its maximum. */ diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 20a64fe6254b..6ae89f2b541b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1455,12 +1455,31 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; + __be16 o_flags = p->o_flags; + + if (t->erspan_ver == 1 || t->erspan_ver == 2) { + if (!t->collect_md) + o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) + goto nla_put_failure; + + if (t->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) + goto nla_put_failure; + } else { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) + goto nla_put_failure; + } + } if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, gre_tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_OFLAGS, - gre_tnl_flags_to_gre_flags(p->o_flags)) || + gre_tnl_flags_to_gre_flags(o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || @@ -1490,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) - goto nla_put_failure; - - if (t->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) - goto nla_put_failure; - } else if (t->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) - goto nla_put_failure; - } - return 0; nla_put_failure: diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index d7b43e700023..68a21bf75dd0 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -74,6 +74,33 @@ drop: return 0; } +static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type) +{ + struct ip_tunnel *tunnel; + const struct iphdr *iph = ip_hdr(skb); + struct net *net = dev_net(skb->dev); + struct ip_tunnel_net *itn = net_generic(net, vti_net_id); + + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, + iph->saddr, iph->daddr, 0); + if (tunnel) { + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + goto drop; + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; + + skb->dev = tunnel->dev; + + return xfrm_input(skb, nexthdr, spi, encap_type); + } + + return -EINVAL; +drop: + kfree_skb(skb); + return 0; +} + static int vti_rcv(struct sk_buff *skb) { XFRM_SPI_SKB_CB(skb)->family = AF_INET; @@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb) return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); } +static int vti_rcv_ipip(struct sk_buff *skb) +{ + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + + return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); +} + static int vti_rcv_cb(struct sk_buff *skb, int err) { unsigned short family; @@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { .priority = 100, }; +static struct xfrm_tunnel ipip_handler __read_mostly = { + .handler = vti_rcv_ipip, + .err_handler = vti4_err, + .priority = 0, +}; + static int __net_init vti_init_net(struct net *net) { int err; @@ -603,6 +644,13 @@ static int __init vti_init(void) if (err < 0) goto xfrm_proto_comp_failed; + msg = "ipip tunnel"; + err = xfrm4_tunnel_register(&ipip_handler, AF_INET); + if (err < 0) { + pr_info("%s: cant't register tunnel\n",__func__); + goto xfrm_tunnel_failed; + } + msg = "netlink interface"; err = rtnl_link_register(&vti_link_ops); if (err < 0) @@ -612,6 +660,8 @@ static int __init vti_init(void) rtnl_link_failed: xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); +xfrm_tunnel_failed: + xfrm4_tunnel_deregister(&ipip_handler, AF_INET); xfrm_proto_comp_failed: xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm_proto_ah_failed: diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index b61977db9b7f..2a909e5f9ba0 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net) static void clusterip_net_exit(struct net *net) { +#ifdef CONFIG_PROC_FS struct clusterip_net *cn = clusterip_pernet(net); -#ifdef CONFIG_PROC_FS mutex_lock(&cn->mutex); proc_remove(cn->procdir); cn->procdir = NULL; diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 2687db015b6f..fa2ba7c500e4 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, /* Change outer to look like the reply to an incoming packet */ nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); + target.dst.protonum = IPPROTO_ICMP; if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) return 0; diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c index a0aa13bcabda..0a8a60c1bf9a 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c @@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) int snmp_version(void *context, size_t hdrlen, unsigned char tag, const void *data, size_t datalen) { + if (datalen != 1) + return -EINVAL; if (*(unsigned char *)data > 1) return -ENOTSUPP; return 1; @@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag, const void *data, size_t datalen) { struct snmp_ctx *ctx = (struct snmp_ctx *)context; - __be32 *pdata = (__be32 *)data; + __be32 *pdata; + if (datalen != 4) + return -EINVAL; + pdata = (__be32 *)data; if (*pdata == ctx->from) { pr_debug("%s: %pI4 to %pI4\n", __func__, (void *)&ctx->from, (void *)&ctx->to); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce92f73cf104..5163b64f8fb3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) /* No redirected packets during ip_rt_redirect_silence; * reset the algorithm. */ - if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) + if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { peer->rate_tokens = 0; + peer->n_redirects = 0; + } /* Too many ignored redirects; do not send anything * set dst.rate_last to the last seen redirected packet. */ - if (peer->rate_tokens >= ip_rt_redirect_number) { + if (peer->n_redirects >= ip_rt_redirect_number) { peer->rate_last = jiffies; goto out_put_peer; } @@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); peer->rate_last = jiffies; ++peer->rate_tokens; + ++peer->n_redirects; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && peer->rate_tokens == ip_rt_redirect_number) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2079145a3b7c..cf3c5095c10e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk) sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); tcp_sk(sk)->packets_out = 0; + inet_csk(sk)->icsk_backoff = 0; } int tcp_disconnect(struct sock *sk, int flags) @@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags) tp->write_seq += tp->max_window + 2; if (tp->write_seq == 0) tp->write_seq = 1; - icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index efc6fef692ff..ec3cea9d6828 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (sock_owned_by_user(sk)) break; + skb = tcp_rtx_queue_head(sk); + if (WARN_ON_ONCE(!skb)) + break; + icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); - skb = tcp_rtx_queue_head(sk); tcp_mstamp_refresh(tp); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 730bc44dbad9..ccc78f3a4b60 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); + tcp_init_tso_segs(skb, mss_now); goto repair; /* Skip network transmission */ } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5c3cd5d84a6f..372fdc5381a9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { int (*handler)(struct sk_buff *skb, u32 info); + const struct ip_tunnel_encap_ops *encap; - if (!iptun_encaps[i]) + encap = rcu_dereference(iptun_encaps[i]); + if (!encap) continue; - handler = rcu_dereference(iptun_encaps[i]->err_handler); + handler = encap->err_handler; if (handler && !handler(skb, info)) return 0; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 84c358804355..72ffd3d760ff 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) list_for_each_entry(ifa, &idev->addr_list, if_list) { if (ifa == ifp) continue; - if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, + if (ifa->prefix_len != ifp->prefix_len || + !ipv6_prefix_equal(&ifa->addr, &ifp->addr, ifp->prefix_len)) continue; if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 5afe9f83374d..239d4a65ad6e 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info skb->len += tailen; skb->data_len += tailen; skb->truesize += tailen; - if (sk) + if (sk && sk_fullsock(sk)) refcount_add(tailen, &sk->sk_wmem_alloc); goto out; diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index b858bd5280bf..867474abe269 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c @@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, static int gue6_err_proto_handler(int proto, struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info) + u8 type, u8 code, int offset, __be32 info) { const struct inet6_protocol *ipprot; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4416368dbd49..26f25b6e2833 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static void ip6erspan_set_version(struct nlattr *data[], + struct __ip6_tnl_parm *parms) +{ + if (!data) + return; + + parms->erspan_ver = 1; + if (data[IFLA_GRE_ERSPAN_VER]) + parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); + + if (parms->erspan_ver == 1) { + if (data[IFLA_GRE_ERSPAN_INDEX]) + parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); + } else if (parms->erspan_ver == 2) { + if (data[IFLA_GRE_ERSPAN_DIR]) + parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); + if (data[IFLA_GRE_ERSPAN_HWID]) + parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); + } +} + static void ip6gre_netlink_parms(struct nlattr *data[], struct __ip6_tnl_parm *parms) { @@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], if (data[IFLA_GRE_COLLECT_METADATA]) parms->collect_md = true; - - parms->erspan_ver = 1; - if (data[IFLA_GRE_ERSPAN_VER]) - parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); - - if (parms->erspan_ver == 1) { - if (data[IFLA_GRE_ERSPAN_INDEX]) - parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); - } else if (parms->erspan_ver == 2) { - if (data[IFLA_GRE_ERSPAN_DIR]) - parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); - if (data[IFLA_GRE_ERSPAN_HWID]) - parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); - } } static int ip6gre_tap_init(struct net_device *dev) @@ -2098,12 +2105,31 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct __ip6_tnl_parm *p = &t->parms; + __be16 o_flags = p->o_flags; + + if (p->erspan_ver == 1 || p->erspan_ver == 2) { + if (!p->collect_md) + o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) + goto nla_put_failure; + + if (p->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) + goto nla_put_failure; + } else { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) + goto nla_put_failure; + } + } if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, gre_tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_OFLAGS, - gre_tnl_flags_to_gre_flags(p->o_flags)) || + gre_tnl_flags_to_gre_flags(o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || @@ -2112,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || - nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || - nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) + nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, @@ -2131,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) - goto nla_put_failure; - - if (p->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) - goto nla_put_failure; - } else if (p->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) - goto nla_put_failure; - } - return 0; nla_put_failure: @@ -2198,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, int err; ip6gre_netlink_parms(data, &nt->parms); + ip6erspan_set_version(data, &nt->parms); ign = net_generic(net, ip6gre_net_id); if (nt->parms.collect_md) { @@ -2243,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], if (IS_ERR(t)) return PTR_ERR(t); + ip6erspan_set_version(data, &p); ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 30337b38274b..cc01aa3f2b5e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) continue; rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); list_del_rcu(&c->list); + call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), + FIB_EVENT_ENTRY_DEL, + (struct mfc6_cache *)c, mrt->id); mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); mr_cache_put(c); } @@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) spin_lock_bh(&mfc_unres_lock); list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { list_del(&c->list); - call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), - FIB_EVENT_ENTRY_DEL, - (struct mfc6_cache *)c, - mrt->id); mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8b075f0bc351..6d0b1f3e927b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) struct sock *sk = sk_to_full_sk(skb->sk); unsigned int hh_len; struct dst_entry *dst; + int strict = (ipv6_addr_type(&iph->daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); struct flowi6 fl6 = { .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : - rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, + strict ? skb_dst(skb)->dev->ifindex : 0, .flowi6_mark = skb->mark, .flowi6_uid = sock_net_uid(net, sk), .daddr = iph->daddr, diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 23022447eb49..7a41ee3c11b4 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, } nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); + target.dst.protonum = IPPROTO_ICMPV6; if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) return 0; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 964491cf3672..ce15dc4ccbfa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock); static void rt6_remove_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex) { + struct fib6_info *from; struct net *net; if (!bucket || !rt6_ex) return; net = dev_net(rt6_ex->rt6i->dst.dev); + net->ipv6.rt6_stats->fib_rt_cache--; + + /* purge completely the exception to allow releasing the held resources: + * some [sk] cache may keep the dst around for unlimited time + */ + from = rcu_dereference_protected(rt6_ex->rt6i->from, + lockdep_is_held(&rt6_exception_lock)); + rcu_assign_pointer(rt6_ex->rt6i->from, NULL); + fib6_info_release(from); + dst_dev_put(&rt6_ex->rt6i->dst); + hlist_del_rcu(&rt6_ex->hlist); dst_release(&rt6_ex->rt6i->dst); kfree_rcu(rt6_ex, rcu); WARN_ON_ONCE(!bucket->depth); bucket->depth--; - net->ipv6.rt6_stats->fib_rt_cache--; } /* Remove oldest rt6_ex in bucket and free the memory @@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt) static void rt6_update_exception_stamp_rt(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; - struct fib6_info *from = rt->from; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; - - if (!from || - !(rt->rt6i_flags & RTF_CACHE)) - return; + struct fib6_info *from; rcu_read_lock(); + from = rcu_dereference(rt->from); + if (!from || !(rt->rt6i_flags & RTF_CACHE)) + goto unlock; + bucket = rcu_dereference(from->rt6i_exception_bucket); #ifdef CONFIG_IPV6_SUBTREES @@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) if (rt6_ex) rt6_ex->stamp = jiffies; +unlock: rcu_read_unlock(); } @@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net, u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; const struct in6_addr *gw_addr = &cfg->fc_gateway; u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; + struct fib6_info *from; struct rt6_info *grt; int err; err = 0; grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); if (grt) { + rcu_read_lock(); + from = rcu_dereference(grt->from); if (!grt->dst.error && /* ignore match if it is the default route */ - grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && + from && !ipv6_addr_any(&from->fib6_dst.addr) && (grt->rt6i_flags & flags || dev != grt->dst.dev)) { NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway or device mismatch"); err = -EINVAL; } + rcu_read_unlock(); ip6_rt_put(grt); } @@ -4649,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, table = rt->fib6_table->tb6_id; else table = RT6_TABLE_UNSPEC; - rtm->rtm_table = table; + rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 8d0ba757a46c..9b2f272ca164 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info) rcu_read_unlock(); genlmsg_end(msg, hdr); - genlmsg_reply(msg, info); - - return 0; + return genlmsg_reply(msg, info); nla_put_failure: rcu_read_unlock(); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 8181ee7e1e27..ee5403cbe655 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) } else { ip6_flow_hdr(hdr, 0, flowlabel); hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); + + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); } hdr->nexthdr = NEXTHDR_ROUTING; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1e03305c0549..e8a1dabef803 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) } err = 0; - if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) + if (__in6_dev_get(skb->dev) && + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) goto out; if (t->parms.iph.daddr == 0) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2596ffdeebea..b444483cdb2b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); + struct udp_mib __percpu *mib; bool checksum_valid = false; - struct udp_mib *mib; int is_udp4; if (flags & MSG_ERRQUEUE) @@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable); */ static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info) + u8 type, u8 code, int offset, __be32 info) { int i; for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info); + u8 type, u8 code, int offset, __be32 info); + const struct ip6_tnl_encap_ops *encap; - if (!ip6tun_encaps[i]) + encap = rcu_dereference(ip6tun_encaps[i]); + if (!encap) continue; - handler = rcu_dereference(ip6tun_encaps[i]->err_handler); + handler = encap->err_handler; if (handler && !handler(skb, opt, type, code, offset, info)) return 0; } diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f5b4febeaa25..bc65db782bfb 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); xfrm_flush_gc(); + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); diff --git a/net/key/af_key.c b/net/key/af_key.c index 655c787f9d54..5651c29cb5bd 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock) return 0; } -static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, - gfp_t allocation, struct sock *sk) +static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation, + struct sock *sk) { int err = -ENOBUFS; - sock_hold(sk); - if (*skb2 == NULL) { - if (refcount_read(&skb->users) != 1) { - *skb2 = skb_clone(skb, allocation); - } else { - *skb2 = skb; - refcount_inc(&skb->users); - } - } - if (*skb2 != NULL) { - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { - skb_set_owner_r(*skb2, sk); - skb_queue_tail(&sk->sk_receive_queue, *skb2); - sk->sk_data_ready(sk); - *skb2 = NULL; - err = 0; - } + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) + return err; + + skb = skb_clone(skb, allocation); + + if (skb) { + skb_set_owner_r(skb, sk); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); + err = 0; } - sock_put(sk); return err; } @@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; - struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think @@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, * socket. */ if (pfk->promisc) - pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* the exact target will be processed later */ if (sk == one_sk) @@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, continue; } - err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* Error is cleared after successful sending to at least one * registered KM */ @@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, rcu_read_unlock(); if (one_sk != NULL) - err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + err = pfkey_broadcast_one(skb, allocation, one_sk); - kfree_skb(skb2); kfree_skb(skb); return err; } @@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m if (proto == 0) return -EINVAL; - err = xfrm_state_flush(net, proto, true); + err = xfrm_state_flush(net, proto, true, false); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - go quietly */ diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 26f1d435696a..fed6becc5daf 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -83,8 +83,7 @@ #define L2TP_SLFLAG_S 0x40000000 #define L2TP_SL_SEQ_MASK 0x00ffffff -#define L2TP_HDR_SIZE_SEQ 10 -#define L2TP_HDR_SIZE_NOSEQ 6 +#define L2TP_HDR_SIZE_MAX 14 /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0 @@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) __skb_pull(skb, sizeof(struct udphdr)); /* Short packet? */ - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { l2tp_info(tunnel, L2TP_MSG_DATA, "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); @@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) goto error; } + if (tunnel->version == L2TP_HDR_VER_3 && + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto error; + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_dec_refcount(session); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9c9afe94d389..b2ce90260c35 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) } #endif +static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, + unsigned char **ptr, unsigned char **optr) +{ + int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); + + if (opt_len > 0) { + int off = *ptr - *optr; + + if (!pskb_may_pull(skb, off + opt_len)) + return -1; + + if (skb->data != *optr) { + *optr = skb->data; + *ptr = skb->data + off; + } + } + + return 0; +} + #define l2tp_printk(ptr, type, func, fmt, ...) \ do { \ if (((ptr)->debug) & (type)) \ diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 35f6f86d4dcc..d4c60523c549 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto discard_sess; + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); l2tp_session_dec_refcount(session); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 237f1a4a0b0c..0ae6899edac0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto discard_sess; + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); l2tp_session_dec_refcount(session); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 69e831bc317b..54821fb1a960 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); + ieee80211_agg_stop_txq(sta, tid); + spin_unlock_bh(&sta->lock); ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2493c74c2d37..96496b2c1670 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER; int err; + int prev_beacon_int; old = sdata_dereference(sdata->u.ap.beacon, sdata); if (old) @@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->needed_rx_chains = sdata->local->rx_chains; + prev_beacon_int = sdata->vif.bss_conf.beacon_int; sdata->vif.bss_conf.beacon_int = params->beacon_interval; if (params->he_cap) @@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, if (!err) ieee80211_vif_copy_chanctx_to_vlans(sdata, false); mutex_unlock(&local->mtx); - if (err) + if (err) { + sdata->vif.bss_conf.beacon_int = prev_beacon_int; return err; + } /* * Apply control port protocol, this allows us to diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 87a729926734..977dea436ee8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 - * second, so 8 is ~4ms of queued data. Only affects local TCP + * second, so 7 is ~8ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ - local->hw.tx_sk_pacing_shift = 8; + local->hw.tx_sk_pacing_shift = 7; /* set up some defaults */ local->hw.queues = 1; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index cad6592c52a1..2ec7011a4d07 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -70,6 +70,7 @@ enum mesh_deferred_task_flags { * @dst: mesh path destination mac address * @mpp: mesh proxy mac address * @rhash: rhashtable list pointer + * @walk_list: linked list containing all mesh_path objects. * @gate_list: list pointer for known gates list * @sdata: mesh subif * @next_hop: mesh neighbor to which frames for this destination will be @@ -105,6 +106,7 @@ struct mesh_path { u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ struct rhash_head rhash; + struct hlist_node walk_list; struct hlist_node gate_list; struct ieee80211_sub_if_data *sdata; struct sta_info __rcu *next_hop; @@ -133,12 +135,16 @@ struct mesh_path { * gate's mpath may or may not be resolved and active. * @gates_lock: protects updates to known_gates * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containging all mesh_path objects + * @walk_lock: lock protecting walk_head * @entries: number of entries in the table */ struct mesh_table { struct hlist_head known_gates; spinlock_t gates_lock; struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ }; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a5125624a76d..88a6d5e18ccc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void) return NULL; INIT_HLIST_HEAD(&newtbl->known_gates); + INIT_HLIST_HEAD(&newtbl->walk_head); atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); + spin_lock_init(&newtbl->walk_lock); return newtbl; } @@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) static struct mesh_path * __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) { - int i = 0, ret; - struct mesh_path *mpath = NULL; - struct rhashtable_iter iter; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return NULL; - - rhashtable_walk_start(&iter); + int i = 0; + struct mesh_path *mpath; - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (i++ == idx) break; } - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); - if (IS_ERR(mpath) || !mpath) + if (!mpath) return NULL; if (mpath_expired(mpath)) { @@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, return ERR_PTR(-ENOMEM); tbl = sdata->u.mesh.mesh_paths; + spin_lock_bh(&tbl->walk_lock); do { ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, @@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); - + else if (!ret) + hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); } while (unlikely(ret == -EEXIST && !mpath)); + spin_unlock_bh(&tbl->walk_lock); - if (ret && ret != -EEXIST) - return ERR_PTR(ret); - - /* At this point either new_mpath was added, or we found a - * matching entry already in the table; in the latter case - * free the unnecessary new entry. - */ - if (ret == -EEXIST) { + if (ret) { kfree(new_mpath); + + if (ret != -EEXIST) + return ERR_PTR(ret); + new_mpath = mpath; } + sdata->u.mesh.mesh_paths_generation++; return new_mpath; } @@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, memcpy(new_mpath->mpp, mpp, ETH_ALEN); tbl = sdata->u.mesh.mpp_paths; + + spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); + if (!ret) + hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); + spin_unlock_bh(&tbl->walk_lock); + + if (ret) + kfree(new_mpath); sdata->u.mesh.mpp_paths_generation++; return ret; @@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta) struct mesh_table *tbl = sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + rcu_read_lock(); + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { @@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta) WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + rcu_read_unlock(); } static void mesh_path_free_rcu(struct mesh_table *tbl, @@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { + hlist_del_rcu(&mpath->walk_list); rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); mesh_path_free_rcu(tbl, mpath); } @@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + struct hlist_node *n; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, @@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, { struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + struct hlist_node *n; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (ether_addr_equal(mpath->mpp, proxy)) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); + struct hlist_node *n; - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } /** @@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl, { struct mesh_path *mpath; - rcu_read_lock(); + spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); if (!mpath) { - rcu_read_unlock(); + spin_unlock_bh(&tbl->walk_lock); return -ENXIO; } __mesh_path_del(tbl, mpath); - rcu_read_unlock(); + spin_unlock_bh(&tbl->walk_lock); return 0; } @@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; + struct hlist_node *n; - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb4d71efb6fb..c2a6da5d80da 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u16 ac, q, hdrlen; + int tailroom = 0; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (!ifmsh->mshcfg.dot11MeshForwarding) goto out; + if (sdata->crypto_tx_tailroom_needed_cnt) + tailroom = IEEE80211_ENCRYPT_TAILROOM; + fwd_skb = skb_copy_expand(skb, local->tx_headroom + - sdata->encrypt_headroom, 0, GFP_ATOMIC); + sdata->encrypt_headroom, + tailroom, GFP_ATOMIC); if (!fwd_skb) goto out; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f170d6c6629a..928f13a208b0 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, int head_need, bool may_encrypt) { struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr; + bool enc_tailroom; int tail_need = 0; - if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { + hdr = (struct ieee80211_hdr *) skb->data; + enc_tailroom = may_encrypt && + (sdata->crypto_tx_tailroom_needed_cnt || + ieee80211_is_mgmt(hdr->frame_control)); + + if (enc_tailroom) { tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need -= skb_tailroom(skb); tail_need = max_t(int, tail_need, 0); @@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, if (skb_cloned(skb) && (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || - !skb_clone_writable(skb, ETH_HLEN) || - (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d0eb38b890aa..ba950ae974fc 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -5,7 +5,7 @@ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: break; + case NL80211_IFTYPE_ADHOC: + if (sdata->vif.bss_conf.ibss_joined) + WARN_ON(drv_join_ibss(local, sdata)); + /* fall through */ default: ieee80211_reconfig_stations(sdata); /* fall through */ diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index cad48d07c818..8401cefd9f65 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -29,6 +29,7 @@ config IP_VS_IPV6 bool "IPv6 support for IPVS" depends on IPV6 = y || IP_VS = IPV6 select IP6_NF_IPTABLES + select NF_DEFRAG_IPV6 ---help--- Add IPv6 support to IPVS. diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index fe9abf3cc10a..235205c93e14 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, /* sorry, all this trouble for a no-hit :) */ IP_VS_DBG_PKT(12, af, pp, skb, iph->off, "ip_vs_in: packet continues traversal as normal"); - if (iph->fragoffs) { - /* Fragment that couldn't be mapped to a conn entry - * is missing module nf_defrag_ipv6 - */ - IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); + + /* Fragment couldn't be mapped to a conn entry */ + if (iph->fragoffs) IP_VS_DBG_PKT(7, af, pp, skb, iph->off, "unhandled fragment"); - } + *verdict = NF_ACCEPT; return 0; } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 432141f04af3..ac8d848d7624 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -43,6 +43,7 @@ #ifdef CONFIG_IP_VS_IPV6 #include <net/ipv6.h> #include <net/ip6_route.h> +#include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <net/route.h> #include <net/sock.h> @@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, #ifdef CONFIG_IP_VS_IPV6 if (udest->af == AF_INET6) { + int ret; + atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) return -EINVAL; + + ret = nf_defrag_ipv6_enable(svc->ipvs->net); + if (ret) + return ret; } else #endif { @@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, ret = -EINVAL; goto out_err; } + + ret = nf_defrag_ipv6_enable(ipvs->net); + if (ret) + goto out_err; } #endif @@ -2221,6 +2232,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user u->udp_timeout); #ifdef CONFIG_IP_VS_PROTO_TCP + if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) || + u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) { + return -EINVAL; + } +#endif + +#ifdef CONFIG_IP_VS_PROTO_UDP + if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ)) + return -EINVAL; +#endif + +#ifdef CONFIG_IP_VS_PROTO_TCP if (u->tcp_timeout) { pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 741b533148ba..db4d46332e86 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, } if (nf_ct_key_equal(h, tuple, zone, net)) { + /* Tuple is taken already, so caller will need to find + * a new source port to use. + * + * Only exception: + * If the *original tuples* are identical, then both + * conntracks refer to the same flow. + * This is a rare situation, it can occur e.g. when + * more than one UDP packet is sent from same socket + * in different threads. + * + * Let nf_ct_resolve_clash() deal with this later. + */ + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) + continue; + NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); return 1; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fb07f6cfc719..4893f248dfdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans) kfree(trans); } +static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) +{ + struct net *net = ctx->net; + struct nft_trans *trans; + + if (!nft_set_is_anonymous(set)) + return; + + list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { + if (trans->msg_type == NFT_MSG_NEWSET && + nft_trans_set(trans) == set) { + nft_trans_set_bound(trans) = true; + break; + } + } +} + static int nf_tables_register_hook(struct net *net, const struct nft_table *table, struct nft_chain *chain) @@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx) return err; } -/* either expr ops provide both activate/deactivate, or neither */ -static bool nft_expr_check_ops(const struct nft_expr_ops *ops) -{ - if (!ops) - return true; - - if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) - return false; - - return true; -} - static void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule) { @@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, } static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, - struct nft_rule *rule) + struct nft_rule *rule, + enum nft_trans_phase phase) { struct nft_expr *expr; expr = nft_expr_first(rule); while (expr != nft_expr_last(rule) && expr->ops) { if (expr->ops->deactivate) - expr->ops->deactivate(ctx, expr); + expr->ops->deactivate(ctx, expr, phase); expr = nft_expr_next(expr); } @@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) nft_trans_destroy(trans); return err; } - nft_rule_expr_deactivate(ctx, rule); + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); return 0; } @@ -307,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) int err; list_for_each_entry(rule, &ctx->chain->rules, list) { + if (!nft_is_active_next(ctx->net, rule)) + continue; + err = nft_delrule(ctx, rule); if (err < 0) return err; @@ -1929,9 +1938,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, */ int nft_register_expr(struct nft_expr_type *type) { - if (!nft_expr_check_ops(type->ops)) - return -EINVAL; - nfnl_lock(NFNL_SUBSYS_NFTABLES); if (type->family == NFPROTO_UNSPEC) list_add_tail_rcu(&type->list, &nf_tables_expressions); @@ -2079,10 +2085,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, err = PTR_ERR(ops); goto err1; } - if (!nft_expr_check_ops(ops)) { - err = -EINVAL; - goto err1; - } } else ops = type->ops; @@ -2511,7 +2513,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { - nft_rule_expr_deactivate(ctx, rule); + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); } @@ -3708,39 +3710,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, bind: binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); + nft_set_trans_bind(ctx, set); + return 0; } EXPORT_SYMBOL_GPL(nf_tables_bind_set); -void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding) -{ - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && - nft_is_active(ctx->net, set)) - list_add_tail_rcu(&set->list, &ctx->table->sets); - - list_add_tail_rcu(&binding->list, &set->bindings); -} -EXPORT_SYMBOL_GPL(nf_tables_rebind_set); - void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding) + struct nft_set_binding *binding, bool event) { list_del_rcu(&binding->list); - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && - nft_is_active(ctx->net, set)) + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { list_del_rcu(&set->list); + if (event) + nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, + GFP_KERNEL); + } } EXPORT_SYMBOL_GPL(nf_tables_unbind_set); void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) { - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && - nft_is_active(ctx->net, set)) { - nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) nft_set_destroy(set); - } } EXPORT_SYMBOL_GPL(nf_tables_destroy_set); @@ -6535,6 +6528,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_DELRULE); + nft_rule_expr_deactivate(&trans->ctx, + nft_trans_rule(trans), + NFT_TRANS_COMMIT); break; case NFT_MSG_NEWSET: nft_clear(net, nft_trans_set(trans)); @@ -6621,7 +6617,8 @@ static void nf_tables_abort_release(struct nft_trans *trans) nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_NEWSET: - nft_set_destroy(nft_trans_set(trans)); + if (!nft_trans_set_bound(trans)) + nft_set_destroy(nft_trans_set(trans)); break; case NFT_MSG_NEWSETELEM: nft_set_elem_destroy(nft_trans_elem_set(trans), @@ -6682,7 +6679,9 @@ static int __nf_tables_abort(struct net *net) case NFT_MSG_NEWRULE: trans->ctx.chain->use--; list_del_rcu(&nft_trans_rule(trans)->list); - nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); + nft_rule_expr_deactivate(&trans->ctx, + nft_trans_rule(trans), + NFT_TRANS_ABORT); break; case NFT_MSG_DELRULE: trans->ctx.chain->use++; @@ -6692,7 +6691,8 @@ static int __nf_tables_abort(struct net *net) break; case NFT_MSG_NEWSET: trans->ctx.table->use--; - list_del_rcu(&nft_trans_set(trans)->list); + if (!nft_trans_set_bound(trans)) + list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: trans->ctx.table->use++; diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 6f41dd74729d..1f1d90c1716b 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb, int ttl_check, struct nf_osf_hdr_ctx *ctx) { + const __u8 *optpinit = ctx->optp; unsigned int check_WSS = 0; int fmatch = FMATCH_WRONG; int foptsize, optnum; @@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb, } } + if (fmatch != FMATCH_OK) + ctx->optp = optpinit; + return fmatch == FMATCH_OK; } diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7334e0b80a5e..0a4bad55a8aa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -22,11 +22,15 @@ #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_arp/arp_tables.h> #include <net/netfilter/nf_tables.h> +#include <net/netns/generic.h> struct nft_xt { struct list_head head; struct nft_expr_ops ops; - unsigned int refcnt; + refcount_t refcnt; + + /* used only when transaction mutex is locked */ + unsigned int listcnt; /* Unlike other expressions, ops doesn't have static storage duration. * nft core assumes they do. We use kfree_rcu so that nft core can @@ -43,10 +47,39 @@ struct nft_xt_match_priv { void *info; }; +struct nft_compat_net { + struct list_head nft_target_list; + struct list_head nft_match_list; +}; + +static unsigned int nft_compat_net_id __read_mostly; +static struct nft_expr_type nft_match_type; +static struct nft_expr_type nft_target_type; + +static struct nft_compat_net *nft_compat_pernet(struct net *net) +{ + return net_generic(net, nft_compat_net_id); +} + +static void nft_xt_get(struct nft_xt *xt) +{ + /* refcount_inc() warns on 0 -> 1 transition, but we can't + * init the reference count to 1 in .select_ops -- we can't + * undo such an increase when another expression inside the same + * rule fails afterwards. + */ + if (xt->listcnt == 0) + refcount_set(&xt->refcnt, 1); + else + refcount_inc(&xt->refcnt); + + xt->listcnt++; +} + static bool nft_xt_put(struct nft_xt *xt) { - if (--xt->refcnt == 0) { - list_del(&xt->head); + if (refcount_dec_and_test(&xt->refcnt)) { + WARN_ON_ONCE(!list_empty(&xt->head)); kfree_rcu(xt, rcu_head); return true; } @@ -273,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; nft_xt = container_of(expr->ops, struct nft_xt, ops); - nft_xt->refcnt++; + nft_xt_get(nft_xt); return 0; } @@ -282,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { struct xt_target *target = expr->ops->data; void *info = nft_expr_priv(expr); + struct module *me = target->me; struct xt_tgdtor_param par; par.net = ctx->net; @@ -292,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) par.target->destroy(&par); if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) - module_put(target->me); + module_put(me); } static int nft_extension_dump_info(struct sk_buff *skb, int attr, @@ -486,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return ret; nft_xt = container_of(expr->ops, struct nft_xt, ops); - nft_xt->refcnt++; + nft_xt_get(nft_xt); return 0; } @@ -540,6 +574,18 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); } +static void nft_compat_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + enum nft_trans_phase phase) +{ + struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); + + if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { + if (--xt->listcnt == 0) + list_del_init(&xt->head); + } +} + static void nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { @@ -734,10 +780,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { .cb = nfnl_nft_compat_cb, }; -static LIST_HEAD(nft_match_list); - -static struct nft_expr_type nft_match_type; - static bool nft_match_cmp(const struct xt_match *match, const char *name, u32 rev, u32 family) { @@ -749,6 +791,7 @@ static const struct nft_expr_ops * nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { + struct nft_compat_net *cn; struct nft_xt *nft_match; struct xt_match *match; unsigned int matchsize; @@ -765,8 +808,10 @@ nft_match_select_ops(const struct nft_ctx *ctx, rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); family = ctx->family; + cn = nft_compat_pernet(ctx->net); + /* Re-use the existing match if it's already loaded. */ - list_for_each_entry(nft_match, &nft_match_list, head) { + list_for_each_entry(nft_match, &cn->nft_match_list, head) { struct xt_match *match = nft_match->ops.data; if (nft_match_cmp(match, mt_name, rev, family)) @@ -789,11 +834,12 @@ nft_match_select_ops(const struct nft_ctx *ctx, goto err; } - nft_match->refcnt = 0; + refcount_set(&nft_match->refcnt, 0); nft_match->ops.type = &nft_match_type; nft_match->ops.eval = nft_match_eval; nft_match->ops.init = nft_match_init; nft_match->ops.destroy = nft_match_destroy; + nft_match->ops.deactivate = nft_compat_deactivate; nft_match->ops.dump = nft_match_dump; nft_match->ops.validate = nft_match_validate; nft_match->ops.data = match; @@ -810,7 +856,8 @@ nft_match_select_ops(const struct nft_ctx *ctx, nft_match->ops.size = matchsize; - list_add(&nft_match->head, &nft_match_list); + nft_match->listcnt = 0; + list_add(&nft_match->head, &cn->nft_match_list); return &nft_match->ops; err: @@ -826,10 +873,6 @@ static struct nft_expr_type nft_match_type __read_mostly = { .owner = THIS_MODULE, }; -static LIST_HEAD(nft_target_list); - -static struct nft_expr_type nft_target_type; - static bool nft_target_cmp(const struct xt_target *tg, const char *name, u32 rev, u32 family) { @@ -841,6 +884,7 @@ static const struct nft_expr_ops * nft_target_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { + struct nft_compat_net *cn; struct nft_xt *nft_target; struct xt_target *target; char *tg_name; @@ -861,8 +905,9 @@ nft_target_select_ops(const struct nft_ctx *ctx, strcmp(tg_name, "standard") == 0) return ERR_PTR(-EINVAL); + cn = nft_compat_pernet(ctx->net); /* Re-use the existing target if it's already loaded. */ - list_for_each_entry(nft_target, &nft_target_list, head) { + list_for_each_entry(nft_target, &cn->nft_target_list, head) { struct xt_target *target = nft_target->ops.data; if (!target->target) @@ -893,11 +938,12 @@ nft_target_select_ops(const struct nft_ctx *ctx, goto err; } - nft_target->refcnt = 0; + refcount_set(&nft_target->refcnt, 0); nft_target->ops.type = &nft_target_type; nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); nft_target->ops.init = nft_target_init; nft_target->ops.destroy = nft_target_destroy; + nft_target->ops.deactivate = nft_compat_deactivate; nft_target->ops.dump = nft_target_dump; nft_target->ops.validate = nft_target_validate; nft_target->ops.data = target; @@ -907,7 +953,8 @@ nft_target_select_ops(const struct nft_ctx *ctx, else nft_target->ops.eval = nft_target_eval_xt; - list_add(&nft_target->head, &nft_target_list); + nft_target->listcnt = 0; + list_add(&nft_target->head, &cn->nft_target_list); return &nft_target->ops; err: @@ -923,13 +970,74 @@ static struct nft_expr_type nft_target_type __read_mostly = { .owner = THIS_MODULE, }; +static int __net_init nft_compat_init_net(struct net *net) +{ + struct nft_compat_net *cn = nft_compat_pernet(net); + + INIT_LIST_HEAD(&cn->nft_target_list); + INIT_LIST_HEAD(&cn->nft_match_list); + + return 0; +} + +static void __net_exit nft_compat_exit_net(struct net *net) +{ + struct nft_compat_net *cn = nft_compat_pernet(net); + struct nft_xt *xt, *next; + + if (list_empty(&cn->nft_match_list) && + list_empty(&cn->nft_target_list)) + return; + + /* If there was an error that caused nft_xt expr to not be initialized + * fully and noone else requested the same expression later, the lists + * contain 0-refcount entries that still hold module reference. + * + * Clean them here. + */ + mutex_lock(&net->nft.commit_mutex); + list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) { + struct xt_target *target = xt->ops.data; + + list_del_init(&xt->head); + + if (refcount_read(&xt->refcnt)) + continue; + module_put(target->me); + kfree(xt); + } + + list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) { + struct xt_match *match = xt->ops.data; + + list_del_init(&xt->head); + + if (refcount_read(&xt->refcnt)) + continue; + module_put(match->me); + kfree(xt); + } + mutex_unlock(&net->nft.commit_mutex); +} + +static struct pernet_operations nft_compat_net_ops = { + .init = nft_compat_init_net, + .exit = nft_compat_exit_net, + .id = &nft_compat_net_id, + .size = sizeof(struct nft_compat_net), +}; + static int __init nft_compat_module_init(void) { int ret; + ret = register_pernet_subsys(&nft_compat_net_ops); + if (ret < 0) + goto err_target; + ret = nft_register_expr(&nft_match_type); if (ret < 0) - return ret; + goto err_pernet; ret = nft_register_expr(&nft_target_type); if (ret < 0) @@ -942,45 +1050,21 @@ static int __init nft_compat_module_init(void) } return ret; - err_target: nft_unregister_expr(&nft_target_type); err_match: nft_unregister_expr(&nft_match_type); +err_pernet: + unregister_pernet_subsys(&nft_compat_net_ops); return ret; } static void __exit nft_compat_module_exit(void) { - struct nft_xt *xt, *next; - - /* list should be empty here, it can be non-empty only in case there - * was an error that caused nft_xt expr to not be initialized fully - * and noone else requested the same expression later. - * - * In this case, the lists contain 0-refcount entries that still - * hold module reference. - */ - list_for_each_entry_safe(xt, next, &nft_target_list, head) { - struct xt_target *target = xt->ops.data; - - if (WARN_ON_ONCE(xt->refcnt)) - continue; - module_put(target->me); - kfree(xt); - } - - list_for_each_entry_safe(xt, next, &nft_match_list, head) { - struct xt_match *match = xt->ops.data; - - if (WARN_ON_ONCE(xt->refcnt)) - continue; - module_put(match->me); - kfree(xt); - } nfnetlink_subsys_unregister(&nfnl_compat_subsys); nft_unregister_expr(&nft_target_type); nft_unregister_expr(&nft_match_type); + unregister_pernet_subsys(&nft_compat_net_ops); } MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 07d4efd3d851..f1172f99752b 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -235,20 +235,17 @@ err1: return err; } -static void nft_dynset_activate(const struct nft_ctx *ctx, - const struct nft_expr *expr) -{ - struct nft_dynset *priv = nft_expr_priv(expr); - - nf_tables_rebind_set(ctx, priv->set, &priv->binding); -} - static void nft_dynset_deactivate(const struct nft_ctx *ctx, - const struct nft_expr *expr) + const struct nft_expr *expr, + enum nft_trans_phase phase) { struct nft_dynset *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); + if (phase == NFT_TRANS_PREPARE) + return; + + nf_tables_unbind_set(ctx, priv->set, &priv->binding, + phase == NFT_TRANS_COMMIT); } static void nft_dynset_destroy(const struct nft_ctx *ctx, @@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = { .eval = nft_dynset_eval, .init = nft_dynset_init, .destroy = nft_dynset_destroy, - .activate = nft_dynset_activate, .deactivate = nft_dynset_deactivate, .dump = nft_dynset_dump, }; diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 0777a93211e2..3f6d1d2a6281 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, } static void nft_immediate_deactivate(const struct nft_ctx *ctx, - const struct nft_expr *expr) + const struct nft_expr *expr, + enum nft_trans_phase phase) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); + if (phase == NFT_TRANS_COMMIT) + return; + return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); } diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 227b2b15a19c..14496da5141d 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx, return 0; } -static void nft_lookup_activate(const struct nft_ctx *ctx, - const struct nft_expr *expr) -{ - struct nft_lookup *priv = nft_expr_priv(expr); - - nf_tables_rebind_set(ctx, priv->set, &priv->binding); -} - static void nft_lookup_deactivate(const struct nft_ctx *ctx, - const struct nft_expr *expr) + const struct nft_expr *expr, + enum nft_trans_phase phase) { struct nft_lookup *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); + if (phase == NFT_TRANS_PREPARE) + return; + + nf_tables_unbind_set(ctx, priv->set, &priv->binding, + phase == NFT_TRANS_COMMIT); } static void nft_lookup_destroy(const struct nft_ctx *ctx, @@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = { .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), .eval = nft_lookup_eval, .init = nft_lookup_init, - .activate = nft_lookup_activate, .deactivate = nft_lookup_deactivate, .destroy = nft_lookup_destroy, .dump = nft_lookup_dump, diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index a3185ca2a3a9..ae178e914486 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -155,20 +155,17 @@ nla_put_failure: return -1; } -static void nft_objref_map_activate(const struct nft_ctx *ctx, - const struct nft_expr *expr) -{ - struct nft_objref_map *priv = nft_expr_priv(expr); - - nf_tables_rebind_set(ctx, priv->set, &priv->binding); -} - static void nft_objref_map_deactivate(const struct nft_ctx *ctx, - const struct nft_expr *expr) + const struct nft_expr *expr, + enum nft_trans_phase phase) { struct nft_objref_map *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); + if (phase == NFT_TRANS_PREPARE) + return; + + nf_tables_unbind_set(ctx, priv->set, &priv->binding, + phase == NFT_TRANS_COMMIT); } static void nft_objref_map_destroy(const struct nft_ctx *ctx, @@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = { .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), .eval = nft_objref_map_eval, .init = nft_objref_map_init, - .activate = nft_objref_map_activate, .deactivate = nft_objref_map_deactivate, .destroy = nft_objref_map_destroy, .dump = nft_objref_map_dump, diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index aecadd471e1d..13e1ac333fa4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1899,7 +1899,7 @@ static int __init xt_init(void) seqcount_init(&per_cpu(xt_recseq, i)); } - xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); + xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); if (!xt) return -ENOMEM; diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index cbd51ed5a2d7..908e53ab47a4 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t1timer, jiffies + nr->t1); + sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1); } void nr_start_t2timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t2timer, jiffies + nr->t2); + sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2); } void nr_start_t4timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t4timer, jiffies + nr->t4); + sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4); } void nr_start_idletimer(struct sock *sk) @@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk) struct nr_sock *nr = nr_sk(sk); if (nr->idle > 0) - mod_timer(&nr->idletimer, jiffies + nr->idle); + sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle); } void nr_start_heartbeat(struct sock *sk) { - mod_timer(&sk->sk_timer, jiffies + 5 * HZ); + sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ); } void nr_stop_t1timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t1timer); + sk_stop_timer(sk, &nr_sk(sk)->t1timer); } void nr_stop_t2timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t2timer); + sk_stop_timer(sk, &nr_sk(sk)->t2timer); } void nr_stop_t4timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t4timer); + sk_stop_timer(sk, &nr_sk(sk)->t4timer); } void nr_stop_idletimer(struct sock *sk) { - del_timer(&nr_sk(sk)->idletimer); + sk_stop_timer(sk, &nr_sk(sk)->idletimer); } void nr_stop_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); } int nr_t1timer_running(struct sock *sk) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3b1a78906bc0..1cd1d83a4be0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; - if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) + if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9fc76b19cd3c..db3473540303 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code, ph->utid = 0; ph->message_id = id; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = code; + ph->error_code = code; return pn_skb_send(sk, skb, NULL); } @@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code, ph->utid = id; /* whatever */ ph->message_id = id; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = code; + ph->error_code = code; return pn_skb_send(sk, skb, NULL); } @@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, struct pnpipehdr *ph; struct sockaddr_pn dst; u8 data[4] = { - oph->data[0], /* PEP type */ + oph->pep_type, /* PEP type */ code, /* error code, at an unusual offset */ PAD, PAD, }; @@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, ph->utid = oph->utid; ph->message_id = PNS_PEP_CTRL_RESP; ph->pipe_handle = oph->pipe_handle; - ph->data[0] = oph->data[1]; /* CTRL id */ + ph->data0 = oph->data[0]; /* CTRL id */ pn_skb_get_src_sockaddr(oskb, &dst); return pn_skb_send(sk, skb, &dst); @@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) return -EINVAL; hdr = pnp_hdr(skb); - if (hdr->data[0] != PN_PEP_TYPE_COMMON) { + if (hdr->pep_type != PN_PEP_TYPE_COMMON) { net_dbg_ratelimited("Phonet unknown PEP type: %u\n", - (unsigned int)hdr->data[0]); + (unsigned int)hdr->pep_type); return -EOPNOTSUPP; } - switch (hdr->data[1]) { + switch (hdr->data[0]) { case PN_PEP_IND_FLOW_CONTROL: switch (pn->tx_fc) { case PN_LEGACY_FLOW_CONTROL: - switch (hdr->data[4]) { + switch (hdr->data[3]) { case PEP_IND_BUSY: atomic_set(&pn->tx_credits, 0); break; @@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) } break; case PN_ONE_CREDIT_FLOW_CONTROL: - if (hdr->data[4] == PEP_IND_READY) + if (hdr->data[3] == PEP_IND_READY) atomic_set(&pn->tx_credits, wake = 1); break; } @@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) break; - atomic_add(wake = hdr->data[4], &pn->tx_credits); + atomic_add(wake = hdr->data[3], &pn->tx_credits); break; default: net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", - (unsigned int)hdr->data[1]); + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } if (wake) @@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr = pnp_hdr(skb); - u8 n_sb = hdr->data[0]; + u8 n_sb = hdr->data0; pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; __skb_pull(skb, sizeof(*hdr)); @@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) return -ECONNREFUSED; /* Parse sub-blocks */ - n_sb = hdr->data[4]; + n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[6], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); @@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk) ph->utid = 0; ph->message_id = PNS_PIPE_REMOVE_REQ; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = PAD; + ph->data0 = PAD; return pn_skb_send(sk, skb, NULL); } @@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, peer_type = hdr->other_pep_type << 8; /* Parse sub-blocks (options) */ - n_sb = hdr->data[4]; + n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[1], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); @@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) ph->utid = 0; if (pn->aligned) { ph->message_id = PNS_PIPE_ALIGNED_DATA; - ph->data[0] = 0; /* padding */ + ph->data0 = 0; /* padding */ } else ph->message_id = PNS_PIPE_DATA; ph->pipe_handle = pn->pipe_handle; diff --git a/net/rds/bind.c b/net/rds/bind.c index 762d2c6788a3..17c9d9f0c848 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, __rds_create_bind_key(key, addr, port, scope_id); rcu_read_lock(); rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); - if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) - rds_sock_addref(rs); - else + if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) || + !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt))) rs = NULL; + rcu_read_unlock(); rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 77e9f85a2c92..f2ff21d7df08 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev) /* * Route a frame to an appropriate AX.25 connection. + * A NULL ax25_cb indicates an internally generated frame. */ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) { @@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) if (skb->len < ROSE_MIN_LEN) return res; + + if (!ax25) + return rose_loopback_queue(skb, NULL); + frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); if (frametype == ROSE_CALL_REQUEST && diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index eaf19ebaa964..3f7bb11f3290 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -596,6 +596,7 @@ error_requeue_call: } error_no_call: release_sock(&rx->sk); +error_trace: trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); return ret; @@ -604,7 +605,7 @@ wait_interrupted: wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; - goto error_no_call; + goto error_trace; } /** diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f6aa57fbbbaf..12ca9d13db83 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!tc_skip_hw(fnew->flags)) { err = fl_hw_replace_filter(tp, fnew, extack); if (err) - goto errout_mask; + goto errout_mask_ht; } if (!tc_in_hw(fnew->flags)) @@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, kfree(mask); return 0; +errout_mask_ht: + rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, + fnew->mask->filter_ht_params); + errout_mask: fl_mask_put(head, fnew->mask, false); diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 9ccc93f257db..38bb882bb958 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -48,7 +48,7 @@ struct tcindex_data { u32 hash; /* hash table size; 0 if undefined */ u32 alloc_hash; /* allocated size */ u32 fall_through; /* 0: only classify if explicit match */ - struct rcu_head rcu; + struct rcu_work rwork; }; static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) @@ -221,17 +221,11 @@ found: return 0; } -static int tcindex_destroy_element(struct tcf_proto *tp, - void *arg, struct tcf_walker *walker) -{ - bool last; - - return tcindex_delete(tp, arg, &last, NULL); -} - -static void __tcindex_destroy(struct rcu_head *head) +static void tcindex_destroy_work(struct work_struct *work) { - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + struct tcindex_data *p = container_of(to_rcu_work(work), + struct tcindex_data, + rwork); kfree(p->perfect); kfree(p->h); @@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r) return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } -static void __tcindex_partial_destroy(struct rcu_head *head) +static void tcindex_partial_destroy_work(struct work_struct *work) { - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + struct tcindex_data *p = container_of(to_rcu_work(work), + struct tcindex_data, + rwork); kfree(p->perfect); kfree(p); @@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp) kfree(cp->perfect); } -static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) +static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) { int i, err = 0; @@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); if (err < 0) goto errout; +#ifdef CONFIG_NET_CLS_ACT + cp->perfect[i].exts.net = net; +#endif } return 0; @@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) { struct tcindex_filter_result new_filter_result, *old_r = r; - struct tcindex_filter_result cr; struct tcindex_data *cp = NULL, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ + struct tcf_result cr = {}; int err, balloc = 0; struct tcf_exts e; @@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (p->perfect) { int i; - if (tcindex_alloc_perfect_hash(cp) < 0) + if (tcindex_alloc_perfect_hash(net, cp) < 0) goto errout; for (i = 0; i < cp->hash; i++) cp->perfect[i].res = p->perfect[i].res; @@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = tcindex_filter_result_init(&new_filter_result); if (err < 0) goto errout1; - err = tcindex_filter_result_init(&cr); - if (err < 0) - goto errout1; if (old_r) - cr.res = r->res; + cr = r->res; if (tb[TCA_TCINDEX_HASH]) cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); @@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = -ENOMEM; if (!cp->perfect && !cp->h) { if (valid_perfect_hash(cp)) { - if (tcindex_alloc_perfect_hash(cp) < 0) + if (tcindex_alloc_perfect_hash(net, cp) < 0) goto errout_alloc; balloc = 1; } else { @@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } if (tb[TCA_TCINDEX_CLASSID]) { - cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); - tcf_bind_filter(tp, &cr.res, base); + cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); + tcf_bind_filter(tp, &cr, base); } if (old_r && old_r != r) { @@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } oldp = p; - r->res = cr.res; + r->res = cr; tcf_exts_change(&r->exts, &e); rcu_assign_pointer(tp->root, cp); @@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, ; /* nothing */ rcu_assign_pointer(*fp, f); + } else { + tcf_exts_destroy(&new_filter_result.exts); } if (oldp) - call_rcu(&oldp->rcu, __tcindex_partial_destroy); + tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); return 0; errout_alloc: @@ -487,7 +485,6 @@ errout_alloc: else if (balloc == 2) kfree(cp->h); errout1: - tcf_exts_destroy(&cr.exts); tcf_exts_destroy(&new_filter_result.exts); errout: kfree(cp); @@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) { struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcf_walker walker; + int i; pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); - walker.count = 0; - walker.skip = 0; - walker.fn = tcindex_destroy_element; - tcindex_walk(tp, &walker); - call_rcu(&p->rcu, __tcindex_destroy); + if (p->perfect) { + for (i = 0; i < p->hash; i++) { + struct tcindex_filter_result *r = p->perfect + i; + + tcf_unbind_filter(tp, &r->res); + if (tcf_exts_get_net(&r->exts)) + tcf_queue_work(&r->rwork, + tcindex_destroy_rexts_work); + else + __tcindex_destroy_rexts(r); + } + } + + for (i = 0; p->h && i < p->hash; i++) { + struct tcindex_filter *f, *next; + bool last; + + for (f = rtnl_dereference(p->h[i]); f; f = next) { + next = rtnl_dereference(f->next); + tcindex_delete(tp, &f->result, &last, NULL); + } + } + + tcf_queue_work(&p->rwork, tcindex_destroy_work); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 66ba2ce2320f..968a85fe4d4a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev) * netif_carrier_on - set carrier * @dev: network device * - * Device has detected that carrier. + * Device has detected acquisition of carrier. */ void netif_carrier_on(struct net_device *dev) { diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 078f01a8d582..435847d98b51 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) + nla_total_size(1) /* INET_DIAG_TOS */ + nla_total_size(1) /* INET_DIAG_TCLASS */ + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4) /* INET_DIAG_CLASS_ID */ + nla_total_size(addrlen * asoc->peer.transport_count) + nla_total_size(addrlen * addrcnt) + nla_total_size(sizeof(struct inet_diag_meminfo)) diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 123e9f2dc226..edfcf16e704c 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb) { skb->ip_summed = CHECKSUM_NONE; skb->csum_not_inet = 0; + gso_reset_checksum(skb, ~0); return sctp_compute_cksum(skb, skb_transport_offset(skb)); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f93c3cf9e567..65d6d04546ae 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_transport *transport = NULL; struct sctp_sndrcvinfo _sinfo, *sinfo; - struct sctp_association *asoc; + struct sctp_association *asoc, *tmp; struct sctp_cmsgs cmsgs; union sctp_addr *daddr; bool new = false; @@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) /* SCTP_SENDALL process */ if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { - list_for_each_entry(asoc, &ep->asocs, asocs) { + list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) { err = sctp_sendmsg_check_sflags(asoc, sflags, msg, msg_len); if (err == 0) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 80e0ae5534ec..2936ed17bf9e 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count) } } +static size_t fa_index(struct flex_array *fa, void *elem, size_t count) +{ + size_t index = 0; + + while (count--) { + if (elem == flex_array_get(fa, index)) + break; + index++; + } + + return index; +} + /* Migrates chunks from stream queues to new stream queues if needed, * but not across associations. Also, removes those chunks to streams * higher than the new max. @@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, } } - for (i = outcnt; i < stream->outcnt; i++) + for (i = outcnt; i < stream->outcnt; i++) { kfree(SCTP_SO(stream, i)->ext); + SCTP_SO(stream, i)->ext = NULL; + } } static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, @@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, if (stream->out) { fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); + if (stream->out_curr) { + size_t index = fa_index(stream->out, stream->out_curr, + stream->outcnt); + + BUG_ON(index == stream->outcnt); + stream->out_curr = flex_array_get(out, index); + } fa_free(stream->out); } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 033696e6f74f..ad158d311ffa 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport) /* When a data chunk is sent, reset the heartbeat interval. */ expires = jiffies + sctp_transport_timeout(transport); - if (time_before(transport->hb_timer.expires, expires) && + if ((time_before(transport->hb_timer.expires, expires) || + !timer_pending(&transport->hb_timer)) && !mod_timer(&transport->hb_timer, expires + prandom_u32_max(transport->rto))) sctp_transport_hold(transport); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4e56602e0c6..b04a813fc865 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, smc = smc_sk(sk); lock_sock(sk); + if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { + /* socket was connected before, no more data to read */ + rc = 0; + goto out; + } if ((sk->sk_state == SMC_INIT) || (sk->sk_state == SMC_LISTEN) || (sk->sk_state == SMC_CLOSED)) @@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, smc = smc_sk(sk); lock_sock(sk); - + if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { + /* socket was connected before, no more data to read */ + rc = 0; + goto out; + } if (sk->sk_state == SMC_INIT || sk->sk_state == SMC_LISTEN || sk->sk_state == SMC_CLOSED) diff --git a/net/smc/smc.h b/net/smc/smc.h index 5721416d0605..adbdf195eb08 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */ } __aligned(8); enum smc_urg_state { - SMC_URG_VALID, /* data present */ - SMC_URG_NOTYET, /* data pending */ - SMC_URG_READ /* data was already read */ + SMC_URG_VALID = 1, /* data present */ + SMC_URG_NOTYET = 2, /* data pending */ + SMC_URG_READ = 3, /* data was already read */ }; struct smc_connection { diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index db83332ac1c8..fb07ad8d69a6 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -21,13 +21,6 @@ /********************************** send *************************************/ -struct smc_cdc_tx_pend { - struct smc_connection *conn; /* socket connection */ - union smc_host_cursor cursor; /* tx sndbuf cursor sent */ - union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ - u16 ctrl_seq; /* conn. tx sequence # */ -}; - /* handler for send/transmission completion of a CDC msg */ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, struct smc_link *link, @@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, int smc_cdc_get_free_slot(struct smc_connection *conn, struct smc_wr_buf **wr_buf, + struct smc_rdma_wr **wr_rdma_buf, struct smc_cdc_tx_pend **pend) { struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; int rc; rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, + wr_rdma_buf, (struct smc_wr_tx_pend_priv **)pend); if (!conn->alert_token_local) /* abnormal termination */ @@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend) { + union smc_host_cursor cfed; struct smc_link *link; int rc; @@ -105,12 +101,10 @@ int smc_cdc_msg_send(struct smc_connection *conn, conn->tx_cdc_seq++; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; - smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, - &conn->local_tx_ctrl, conn); + smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); if (!rc) - smc_curs_copy(&conn->rx_curs_confirmed, - &conn->local_tx_ctrl.cons, conn); + smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); return rc; } @@ -121,11 +115,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) struct smc_wr_buf *wr_buf; int rc; - rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); + rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend); if (rc) return rc; - return smc_cdc_msg_send(conn, wr_buf, pend); + spin_lock_bh(&conn->send_lock); + rc = smc_cdc_msg_send(conn, wr_buf, pend); + spin_unlock_bh(&conn->send_lock); + return rc; } int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index b5bfe38c7f9b..f1cdde9d4b89 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, #endif } -/* calculate cursor difference between old and new, where old <= new */ +/* calculate cursor difference between old and new, where old <= new and + * difference cannot exceed size + */ static inline int smc_curs_diff(unsigned int size, union smc_host_cursor *old, union smc_host_cursor *new) @@ -185,28 +187,51 @@ static inline int smc_curs_comp(unsigned int size, return smc_curs_diff(size, old, new); } +/* calculate cursor difference between old and new, where old <= new and + * difference may exceed size + */ +static inline int smc_curs_diff_large(unsigned int size, + union smc_host_cursor *old, + union smc_host_cursor *new) +{ + if (old->wrap < new->wrap) + return min_t(int, + (size - old->count) + new->count + + (new->wrap - old->wrap - 1) * size, + size); + + if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */ + return min_t(int, + (size - old->count) + new->count + + (new->wrap + 0xffff - old->wrap) * size, + size); + + return max_t(int, 0, (new->count - old->count)); +} + static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, union smc_host_cursor *local, + union smc_host_cursor *save, struct smc_connection *conn) { - union smc_host_cursor temp; - - smc_curs_copy(&temp, local, conn); - peer->count = htonl(temp.count); - peer->wrap = htons(temp.wrap); + smc_curs_copy(save, local, conn); + peer->count = htonl(save->count); + peer->wrap = htons(save->wrap); /* peer->reserved = htons(0); must be ensured by caller */ } static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, - struct smc_host_cdc_msg *local, - struct smc_connection *conn) + struct smc_connection *conn, + union smc_host_cursor *save) { + struct smc_host_cdc_msg *local = &conn->local_tx_ctrl; + peer->common.type = local->common.type; peer->len = local->len; peer->seqno = htons(local->seqno); peer->token = htonl(local->token); - smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); - smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); + smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn); + smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn); peer->prod_flags = local->prod_flags; peer->conn_state_flags = local->conn_state_flags; } @@ -270,10 +295,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, smcr_cdc_msg_to_host(local, peer, conn); } -struct smc_cdc_tx_pend; +struct smc_cdc_tx_pend { + struct smc_connection *conn; /* socket connection */ + union smc_host_cursor cursor; /* tx sndbuf cursor sent */ + union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ + u16 ctrl_seq; /* conn. tx sequence # */ +}; int smc_cdc_get_free_slot(struct smc_connection *conn, struct smc_wr_buf **wr_buf, + struct smc_rdma_wr **wr_rdma_buf, struct smc_cdc_tx_pend **pend); void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 776e9dfc915d..d53fd588d1f5 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) vec.iov_len = sizeof(struct smc_clc_msg_decline); len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(struct smc_clc_msg_decline)); - if (len < sizeof(struct smc_clc_msg_decline)) + if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) len = -EPROTO; return len > 0 ? 0 : len; } diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ea2b87f29469..e39cadda1bf5 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work) switch (sk->sk_state) { case SMC_INIT: - if (atomic_read(&conn->bytes_to_rcv) || - (rxflags->peer_done_writing && - !smc_cdc_rxed_any_close(conn))) { - sk->sk_state = SMC_APPCLOSEWAIT1; - } else { - sk->sk_state = SMC_CLOSED; - sock_put(sk); /* passive closing */ - } + sk->sk_state = SMC_APPCLOSEWAIT1; break; case SMC_ACTIVE: sk->sk_state = SMC_APPCLOSEWAIT1; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 35c1cdc93e1c..aa1c551cee81 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!lgr) + return; write_lock_bh(&lgr->conns_lock); if (conn->alert_token_local) { __smc_lgr_unregister_conn(conn); @@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn, conn->sndbuf_desc->used = 0; if (conn->rmb_desc) { if (!conn->rmb_desc->regerr) { - conn->rmb_desc->used = 0; if (!lgr->is_smcd) { /* unregister rmb with peer */ smc_llc_do_delete_rkey( &lgr->lnk[SMC_SINGLE_LINK], conn->rmb_desc); } + conn->rmb_desc->used = 0; } else { /* buf registration failed, reuse not possible */ write_lock_bh(&lgr->rmbs_lock); @@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, local_contact = SMC_REUSE_CONTACT; conn->lgr = lgr; smc_lgr_register_conn(conn); /* add smc conn to lgr */ + if (delayed_work_pending(&lgr->free_work)) + cancel_delayed_work(&lgr->free_work); write_unlock_bh(&lgr->conns_lock); break; } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index b00287989a3d..8806d2afa6ed 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -52,6 +52,24 @@ enum smc_wr_reg_state { FAILED /* ib_wr_reg_mr response: failure */ }; +struct smc_rdma_sge { /* sges for RDMA writes */ + struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; +}; + +#define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per + * message send + */ + +struct smc_rdma_sges { /* sges per message send */ + struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; +}; + +struct smc_rdma_wr { /* work requests per message + * send + */ + struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; +}; + struct smc_link { struct smc_ib_device *smcibdev; /* ib-device */ u8 ibport; /* port - values 1 | 2 */ @@ -64,6 +82,8 @@ struct smc_link { struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ struct ib_sge *wr_tx_sges; /* WR send gather meta data */ + struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ + struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ /* above four vectors have wr_tx_cnt elements and use the same index */ dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index e519ef29c0ff..76487a16934e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) { - struct smc_ib_device *smcibdev = - (struct smc_ib_device *)ibevent->device; + struct smc_link *lnk = (struct smc_link *)priv; + struct smc_ib_device *smcibdev = lnk->smcibdev; u8 port_idx; switch (ibevent->event) { @@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) case IB_EVENT_GID_CHANGE: case IB_EVENT_PORT_ERR: case IB_EVENT_QP_ACCESS_ERR: - port_idx = ibevent->element.port_num - 1; + port_idx = ibevent->element.qp->port - 1; set_bit(port_idx, &smcibdev->port_event_mask); schedule_work(&smcibdev->port_event_work); break; diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a6d3623d06f4..4fd60c522802 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link, { int rc; - rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); + rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, + pend); if (rc < 0) return rc; BUILD_BUG_ON_MSG( diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 7cb3e4f07c10..632c3109dee5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -27,7 +27,7 @@ static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, - .len = SMC_MAX_PNETID_LEN - 1 + .len = SMC_MAX_PNETID_LEN }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index d8366ed51757..f93f3580c100 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { + if (send_done) + return send_done; rc = smc_tx_wait(smc, msg->msg_flags); - if (rc) { - if (send_done) - return send_done; + if (rc) goto out_err; - } continue; } @@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, - int num_sges, struct ib_sge sges[]) + int num_sges, struct ib_rdma_wr *rdma_wr) { struct smc_link_group *lgr = conn->lgr; - struct ib_rdma_wr rdma_wr; struct smc_link *link; int rc; - memset(&rdma_wr, 0, sizeof(rdma_wr)); link = &lgr->lnk[SMC_SINGLE_LINK]; - rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); - rdma_wr.wr.sg_list = sges; - rdma_wr.wr.num_sge = num_sges; - rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; - rdma_wr.remote_addr = + rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); + rdma_wr->wr.num_sge = num_sges; + rdma_wr->remote_addr = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + /* RMBE within RMB */ conn->tx_off + /* offset within RMBE */ peer_rmbe_offset; - rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; - rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); + rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; + rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); if (rc) { conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; smc_lgr_terminate(lgr); @@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, /* SMC-R helper for smc_tx_rdma_writes() */ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, size_t src_off, size_t src_len, - size_t dst_off, size_t dst_len) + size_t dst_off, size_t dst_len, + struct smc_rdma_wr *wr_rdma_buf) { dma_addr_t dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); - struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; int src_len_sum = src_len, dst_len_sum = dst_len; - struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; int sent_count = src_off; int srcchunk, dstchunk; int num_sges; int rc; for (dstchunk = 0; dstchunk < 2; dstchunk++) { + struct ib_sge *sge = + wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; + num_sges = 0; for (srcchunk = 0; srcchunk < 2; srcchunk++) { - sges[srcchunk].addr = dma_addr + src_off; - sges[srcchunk].length = src_len; - sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; + sge[srcchunk].addr = dma_addr + src_off; + sge[srcchunk].length = src_len; num_sges++; src_off += src_len; @@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, src_len = dst_len - src_len; /* remainder */ src_len_sum += src_len; } - rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); + rc = smc_tx_rdma_write(conn, dst_off, num_sges, + &wr_rdma_buf->wr_tx_rdma[dstchunk]); if (rc) return rc; if (dst_len_sum == len) @@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; * usable snd_wnd as max transmit */ -static int smc_tx_rdma_writes(struct smc_connection *conn) +static int smc_tx_rdma_writes(struct smc_connection *conn, + struct smc_rdma_wr *wr_rdma_buf) { size_t len, src_len, dst_off, dst_len; /* current chunk values */ union smc_host_cursor sent, prep, prod, cons; @@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) dst_off, dst_len); else rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, - dst_off, dst_len); + dst_off, dst_len, wr_rdma_buf); if (rc) return rc; @@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags; + struct smc_rdma_wr *wr_rdma_buf; struct smc_cdc_tx_pend *pend; struct smc_wr_buf *wr_buf; int rc; - spin_lock_bh(&conn->send_lock); - rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); + rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); if (rc < 0) { if (rc == -EBUSY) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); - if (smc->sk.sk_err == ECONNABORTED) { - rc = sock_error(&smc->sk); - goto out_unlock; - } + if (smc->sk.sk_err == ECONNABORTED) + return sock_error(&smc->sk); rc = 0; if (conn->alert_token_local) /* connection healthy */ mod_delayed_work(system_wq, &conn->tx_work, SMC_TX_WORK_DELAY); } - goto out_unlock; + return rc; } + spin_lock_bh(&conn->send_lock); if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { - rc = smc_tx_rdma_writes(conn); + rc = smc_tx_rdma_writes(conn, wr_rdma_buf); if (rc) { smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], (struct smc_wr_tx_pend_priv *)pend); @@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) spin_lock_bh(&conn->send_lock); if (!pflags->urg_data_present) - rc = smc_tx_rdma_writes(conn); + rc = smc_tx_rdma_writes(conn, NULL); if (!rc) rc = smcd_cdc_msg_send(conn); @@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) if (to_confirm > conn->rmbe_update_limit) { smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); sender_free = conn->rmb_desc->len - - smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); + smc_curs_diff_large(conn->rmb_desc->len, + &cfed, &prod); } if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index c2694750a6a8..253aa75dc2b6 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) * @link: Pointer to smc_link used to later send the message. * @handler: Send completion handler function pointer. * @wr_buf: Out value returns pointer to message buffer. + * @wr_rdma_buf: Out value returns pointer to rdma work request. * @wr_pend_priv: Out value returns pointer serving as handler context. * * Return: 0 on success, or -errno on error. @@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, struct smc_wr_buf **wr_buf, + struct smc_rdma_wr **wr_rdma_buf, struct smc_wr_tx_pend_priv **wr_pend_priv) { struct smc_wr_tx_pend *wr_pend; @@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, wr_ib = &link->wr_tx_ibs[idx]; wr_ib->wr_id = wr_id; *wr_buf = &link->wr_tx_bufs[idx]; + if (wr_rdma_buf) + *wr_rdma_buf = &link->wr_tx_rdmas[idx]; *wr_pend_priv = &wr_pend->priv; return 0; } @@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link, u32 idx = pend->idx; /* clear the full struct smc_wr_tx_pend including .priv */ - memset(&link->wr_tx_pends[pend->idx], 0, - sizeof(link->wr_tx_pends[pend->idx])); - memset(&link->wr_tx_bufs[pend->idx], 0, - sizeof(link->wr_tx_bufs[pend->idx])); + memset(&link->wr_tx_pends[idx], 0, + sizeof(link->wr_tx_pends[idx])); + memset(&link->wr_tx_bufs[idx], 0, + sizeof(link->wr_tx_bufs[idx])); test_and_clear_bit(idx, link->wr_tx_mask); return 1; } @@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = + lnk->roce_pd->local_dma_lkey; + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = + lnk->roce_pd->local_dma_lkey; + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = + lnk->roce_pd->local_dma_lkey; + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = + lnk->roce_pd->local_dma_lkey; lnk->wr_tx_ibs[i].next = NULL; lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; lnk->wr_tx_ibs[i].num_sge = 1; lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; lnk->wr_tx_ibs[i].send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; + lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; + lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; + lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; + lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; } for (i = 0; i < lnk->wr_rx_cnt; i++) { lnk->wr_rx_sges[i].addr = @@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk) lnk->wr_tx_mask = NULL; kfree(lnk->wr_tx_sges); lnk->wr_tx_sges = NULL; + kfree(lnk->wr_tx_rdma_sges); + lnk->wr_tx_rdma_sges = NULL; kfree(lnk->wr_rx_sges); lnk->wr_rx_sges = NULL; + kfree(lnk->wr_tx_rdmas); + lnk->wr_tx_rdmas = NULL; kfree(lnk->wr_rx_ibs); lnk->wr_rx_ibs = NULL; kfree(lnk->wr_tx_ibs); @@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link) GFP_KERNEL); if (!link->wr_rx_ibs) goto no_mem_wr_tx_ibs; + link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, + sizeof(link->wr_tx_rdmas[0]), + GFP_KERNEL); + if (!link->wr_tx_rdmas) + goto no_mem_wr_rx_ibs; + link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, + sizeof(link->wr_tx_rdma_sges[0]), + GFP_KERNEL); + if (!link->wr_tx_rdma_sges) + goto no_mem_wr_tx_rdmas; link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), GFP_KERNEL); if (!link->wr_tx_sges) - goto no_mem_wr_rx_ibs; + goto no_mem_wr_tx_rdma_sges; link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, sizeof(link->wr_rx_sges[0]), GFP_KERNEL); @@ -579,6 +611,10 @@ no_mem_wr_rx_sges: kfree(link->wr_rx_sges); no_mem_wr_tx_sges: kfree(link->wr_tx_sges); +no_mem_wr_tx_rdma_sges: + kfree(link->wr_tx_rdma_sges); +no_mem_wr_tx_rdmas: + kfree(link->wr_tx_rdmas); no_mem_wr_rx_ibs: kfree(link->wr_rx_ibs); no_mem_wr_tx_ibs: diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 1d85bb14fd6f..09bf32fd3959 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev); int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, struct smc_wr_buf **wr_buf, + struct smc_rdma_wr **wrs, struct smc_wr_tx_pend_priv **wr_pend_priv); int smc_wr_tx_put_slot(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv); diff --git a/net/socket.c b/net/socket.c index e89884e2197b..d80d87a395ea 100644 --- a/net/socket.c +++ b/net/socket.c @@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, - unsigned int cmd, unsigned long arg, - unsigned int ifreq_size) + unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; @@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, } else { struct ifreq ifr; bool need_copyout; - if (copy_from_user(&ifr, argp, ifreq_size)) + if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) return -EFAULT; err = dev_ioctl(net, cmd, &ifr, &need_copyout); if (!err && need_copyout) - if (copy_to_user(argp, &ifr, ifreq_size)) + if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) return -EFAULT; } return err; @@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) err = open_related_ns(&net->ns, get_net_ns); break; default: - err = sock_do_ioctl(net, sock, cmd, arg, - sizeof(struct ifreq)); + err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; @@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, - sizeof(struct compat_ifreq)); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); @@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, - sizeof(struct compat_ifreq)); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); @@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, return dev_ioctl(net, cmd, &ifreq, NULL); } +static int compat_ifreq_ioctl(struct net *net, struct socket *sock, + unsigned int cmd, + struct compat_ifreq __user *uifr32) +{ + struct ifreq __user *uifr; + int err; + + /* Handle the fact that while struct ifreq has the same *layout* on + * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data, + * which are handled elsewhere, it still has different *size* due to + * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit, + * resulting in struct ifreq being 32 and 40 bytes respectively). + * As a result, if the struct happens to be at the end of a page and + * the next page isn't readable/writable, we get a fault. To prevent + * that, copy back and forth to the full size. + */ + + uifr = compat_alloc_user_space(sizeof(*uifr)); + if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) + return -EFAULT; + + err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); + + if (!err) { + switch (cmd) { + case SIOCGIFFLAGS: + case SIOCGIFMETRIC: + case SIOCGIFMTU: + case SIOCGIFMEM: + case SIOCGIFHWADDR: + case SIOCGIFINDEX: + case SIOCGIFADDR: + case SIOCGIFBRDADDR: + case SIOCGIFDSTADDR: + case SIOCGIFNETMASK: + case SIOCGIFPFLAGS: + case SIOCGIFTXQLEN: + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCGIFNAME: + if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) + err = -EFAULT; + break; + } + } + return err; +} + static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { @@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, } set_fs(KERNEL_DS); - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, - sizeof(struct compat_ifreq)); + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: @@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: + case SIOCGIFNAME: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - case SIOCSARP: - case SIOCGARP: - case SIOCDARP: - case SIOCATMARK: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: - case SIOCGIFNAME: - return sock_do_ioctl(net, sock, cmd, arg, - sizeof(struct compat_ifreq)); + return compat_ifreq_ioctl(net, sock, cmd, argp); + + case SIOCSARP: + case SIOCGARP: + case SIOCDARP: + case SIOCATMARK: + return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index fb6656295204..507105127095 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf) { struct crypto_sync_skcipher *cipher; - unsigned char plain[8]; + unsigned char *plain; s32 code; dprintk("RPC: %s:\n", __func__); @@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, if (IS_ERR(cipher)) return PTR_ERR(cipher); + plain = kmalloc(8, GFP_NOFS); + if (!plain) + return -ENOMEM; + plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); @@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, code = krb5_encrypt(cipher, cksum, plain, buf, 8); out: + kfree(plain); crypto_free_sync_skcipher(cipher); return code; } @@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx, u32 seqnum, unsigned char *cksum, unsigned char *buf) { - unsigned char plain[8]; + unsigned char *plain; + s32 code; if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_make_rc4_seq_num(kctx, direction, seqnum, cksum, buf); + plain = kmalloc(8, GFP_NOFS); + if (!plain) + return -ENOMEM; + plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); @@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx, plain[6] = direction; plain[7] = direction; - return krb5_encrypt(key, cksum, plain, buf, 8); + code = krb5_encrypt(key, cksum, plain, buf, 8); + kfree(plain); + return code; } static s32 @@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, s32 *seqnum) { struct crypto_sync_skcipher *cipher; - unsigned char plain[8]; + unsigned char *plain; s32 code; dprintk("RPC: %s:\n", __func__); @@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, if (code) goto out; + plain = kmalloc(8, GFP_NOFS); + if (!plain) { + code = -ENOMEM; + goto out; + } + code = krb5_decrypt(cipher, cksum, buf, plain, 8); if (code) - goto out; + goto out_plain; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) { code = (s32)KG_BAD_SEQ; - goto out; + goto out_plain; } *direction = plain[4]; *seqnum = ((plain[0] << 24) | (plain[1] << 16) | (plain[2] << 8) | (plain[3])); +out_plain: + kfree(plain); out: crypto_free_sync_skcipher(cipher); return code; @@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx, int *direction, u32 *seqnum) { s32 code; - unsigned char plain[8]; + unsigned char *plain; struct crypto_sync_skcipher *key = kctx->seq; dprintk("RPC: krb5_get_seq_num:\n"); @@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx, if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_get_rc4_seq_num(kctx, cksum, buf, direction, seqnum); + plain = kmalloc(8, GFP_NOFS); + if (!plain) + return -ENOMEM; if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) - return code; + goto out; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || - (plain[4] != plain[7])) - return (s32)KG_BAD_SEQ; + (plain[4] != plain[7])) { + code = (s32)KG_BAD_SEQ; + goto out; + } *direction = plain[4]; *seqnum = ((plain[0]) | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); - return 0; +out: + kfree(plain); + return code; } diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index 45a033329cd4..19bb356230ed 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt) rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); /* no "debugfs" dentry? Don't bother with the symlink. */ - if (!xprt->debugfs) { + if (IS_ERR_OR_NULL(xprt->debugfs)) { rcu_read_unlock(); return; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index cf51b8f9b15f..1f200119268c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, DMA_TO_DEVICE); } +/* If the xdr_buf has more elements than the device can + * transmit in a single RDMA Send, then the reply will + * have to be copied into a bounce buffer. + */ +static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, + struct xdr_buf *xdr, + __be32 *wr_lst) +{ + int elements; + + /* xdr->head */ + elements = 1; + + /* xdr->pages */ + if (!wr_lst) { + unsigned int remaining; + unsigned long pageoff; + + pageoff = xdr->page_base & ~PAGE_MASK; + remaining = xdr->page_len; + while (remaining) { + ++elements; + remaining -= min_t(u32, PAGE_SIZE - pageoff, + remaining); + pageoff = 0; + } + } + + /* xdr->tail */ + if (xdr->tail[0].iov_len) + ++elements; + + /* assume 1 SGE is needed for the transport header */ + return elements >= rdma->sc_max_send_sges; +} + +/* The device is not capable of sending the reply directly. + * Assemble the elements of @xdr into the transport header + * buffer. + */ +static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt, + struct xdr_buf *xdr, __be32 *wr_lst) +{ + unsigned char *dst, *tailbase; + unsigned int taillen; + + dst = ctxt->sc_xprt_buf; + dst += ctxt->sc_sges[0].length; + + memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); + dst += xdr->head[0].iov_len; + + tailbase = xdr->tail[0].iov_base; + taillen = xdr->tail[0].iov_len; + if (wr_lst) { + u32 xdrpad; + + xdrpad = xdr_padsize(xdr->page_len); + if (taillen && xdrpad) { + tailbase += xdrpad; + taillen -= xdrpad; + } + } else { + unsigned int len, remaining; + unsigned long pageoff; + struct page **ppages; + + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); + pageoff = xdr->page_base & ~PAGE_MASK; + remaining = xdr->page_len; + while (remaining) { + len = min_t(u32, PAGE_SIZE - pageoff, remaining); + + memcpy(dst, page_address(*ppages), len); + remaining -= len; + dst += len; + pageoff = 0; + } + } + + if (taillen) + memcpy(dst, tailbase, taillen); + + ctxt->sc_sges[0].length += xdr->len; + ib_dma_sync_single_for_device(rdma->sc_pd->device, + ctxt->sc_sges[0].addr, + ctxt->sc_sges[0].length, + DMA_TO_DEVICE); + + return 0; +} + /* svc_rdma_map_reply_msg - Map the buffer holding RPC message * @rdma: controlling transport * @ctxt: send_ctxt for the Send WR @@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, u32 xdr_pad; int ret; - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) - return -EIO; + if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) + return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); + + ++ctxt->sc_cur_sge_no; ret = svc_rdma_dma_map_buf(rdma, ctxt, xdr->head[0].iov_base, xdr->head[0].iov_len); @@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, while (remaining) { len = min_t(u32, PAGE_SIZE - page_off, remaining); - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) - return -EIO; + ++ctxt->sc_cur_sge_no; ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, page_off, len); if (ret < 0) @@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, len = xdr->tail[0].iov_len; tail: if (len) { - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) - return -EIO; + ++ctxt->sc_cur_sge_no; ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); if (ret < 0) return ret; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 924c17d46903..57f86c63a463 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) /* Transport header, head iovec, tail iovec */ newxprt->sc_max_send_sges = 3; /* Add one SGE per page list entry */ - newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; - if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { - pr_err("svcrdma: too few Send SGEs available (%d needed)\n", - newxprt->sc_max_send_sges); - goto errout; - } + newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1; + if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) + newxprt->sc_max_send_sges = dev->attrs.max_send_sge; newxprt->sc_max_req_size = svcrdma_max_req_size; newxprt->sc_max_requests = svcrdma_max_requests; newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4994e75945b8..21113bfd4eca 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, sendcq = ib_alloc_cq(ia->ri_device, NULL, ep->rep_attr.cap.max_send_wr + 1, - 1, IB_POLL_WORKQUEUE); + ia->ri_device->num_comp_vectors > 1 ? 1 : 0, + IB_POLL_WORKQUEUE); if (IS_ERR(sendcq)) { rc = PTR_ERR(sendcq); goto out1; diff --git a/net/tipc/link.c b/net/tipc/link.c index 2792a3cae682..85ad5c0678d0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, default: pr_warn("Dropping received illegal msg type\n"); kfree_skb(skb); - return false; + return true; }; } @@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ + if (mtyp == ACTIVATE_MSG) { + msg_set_dest_session_valid(hdr, 1); + msg_set_dest_session(hdr, l->peer_session); + } msg_set_max_pkt(hdr, l->advertised_mtu); strcpy(data, l->if_name); msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); @@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); break; } + + /* If this endpoint was re-created while peer was ESTABLISHING + * it doesn't know current session number. Force re-synch. + */ + if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && + l->session != msg_dest_session(hdr)) { + if (less(l->session, msg_dest_session(hdr))) + l->session = msg_dest_session(hdr) + 1; + break; + } + /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ if (mtyp == RESET_MSG || !link_is_up(l)) rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a0924956bb61..d7e4b8b93f9d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n) msg_set_bits(m, 1, 0, 0xffff, n); } +/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch + * link peer session number + */ +static inline bool msg_dest_session_valid(struct tipc_msg *m) +{ + return msg_bits(m, 1, 16, 0x1); +} + +static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid) +{ + msg_set_bits(m, 1, 16, 0x1, valid); +} + +static inline u16 msg_dest_session(struct tipc_msg *m) +{ + return msg_bits(m, 1, 0, 0xffff); +} + +static inline void msg_set_dest_session(struct tipc_msg *m, u16 n) +{ + msg_set_bits(m, 1, 0, 0xffff, n); +} /* * Word 2 diff --git a/net/tipc/node.c b/net/tipc/node.c index db2a6c3e0be9..2dc4919ab23c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) tipc_node_write_lock(n); if (!tipc_link_is_establishing(l)) { __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); - if (delete) { - kfree(l); - le->link = NULL; - n->link_cnt--; - } } else { /* Defuse pending tipc_node_link_up() */ + tipc_link_reset(l); tipc_link_fsm_evt(l, LINK_RESET_EVT); } + if (delete) { + kfree(l); + le->link = NULL; + n->link_cnt--; + } trace_tipc_node_link_down(n, true, "node link down or deleted!"); tipc_node_write_unlock(n); if (delete) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1217c90a363b..684f2125fc6b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) rc_ = tipc_sk_sock_err((sock_), timeo_); \ if (rc_) \ break; \ - prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ + add_wait_queue(sk_sleep(sk_), &wait_); \ release_sock(sk_); \ *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ sched_annotate_sleep(); \ @@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk) static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) { struct sock *sk = sock->sk; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); long timeo = *timeop; int err = sock_error(sk); @@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) return err; for (;;) { - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { if (sk->sk_shutdown & RCV_SHUTDOWN) { err = -ENOTCONN; break; } + add_wait_queue(sk_sleep(sk), &wait); release_sock(sk); - timeo = schedule_timeout(timeo); + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + sched_annotate_sleep(); lock_sock(sk); + remove_wait_queue(sk_sleep(sk), &wait); } err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) if (err) break; } - finish_wait(sk_sleep(sk), &wait); *timeop = timeo; return err; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 11cdc8f7db63..bf5b54b513bc 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk, struct scatterlist *sge = sk_msg_elem(msg_en, start); int rc; + memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); + sge->offset += tls_ctx->tx.prepend_size; sge->length -= tls_ctx->tx.prepend_size; @@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk, aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, rec->sg_aead_in, rec->sg_aead_out, - data_len, tls_ctx->tx.iv); + data_len, rec->iv_data); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, tls_encrypt_done, sk); @@ -1792,7 +1794,9 @@ void tls_sw_free_resources_tx(struct sock *sk) if (atomic_read(&ctx->encrypt_pending)) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + release_sock(sk); cancel_delayed_work_sync(&ctx->tx_work.work); + lock_sock(sk); /* Tx whatever records we can transmit and abandon the rest */ tls_tx_records(sk, -1); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 74d1eed7cbd4..a95d479caeea 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -890,7 +890,7 @@ retry: addr->hash ^= sk->sk_type; __unix_remove_socket(sk); - u->addr = addr; + smp_store_release(&u->addr, addr); __unix_insert_socket(&unix_socket_table[addr->hash], sk); spin_unlock(&unix_table_lock); err = 0; @@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = 0; __unix_remove_socket(sk); - u->addr = addr; + smp_store_release(&u->addr, addr); __unix_insert_socket(list, sk); out_unlock: @@ -1331,15 +1331,29 @@ restart: RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other); - /* copy address information from listening to new sock*/ - if (otheru->addr) { - refcount_inc(&otheru->addr->refcnt); - newu->addr = otheru->addr; - } + /* copy address information from listening to new sock + * + * The contents of *(otheru->addr) and otheru->path + * are seen fully set up here, since we have found + * otheru in hash under unix_table_lock. Insertion + * into the hash chain we'd found it in had been done + * in an earlier critical area protected by unix_table_lock, + * the same one where we'd set *(otheru->addr) contents, + * as well as otheru->path and otheru->addr itself. + * + * Using smp_store_release() here to set newu->addr + * is enough to make those stores, as well as stores + * to newu->path visible to anyone who gets newu->addr + * by smp_load_acquire(). IOW, the same warranties + * as for unix_sock instances bound in unix_bind() or + * in unix_autobind(). + */ if (otheru->path.dentry) { path_get(&otheru->path); newu->path = otheru->path; } + refcount_inc(&otheru->addr->refcnt); + smp_store_release(&newu->addr, otheru->addr); /* Set credentials */ copy_peercred(sk, other); @@ -1453,7 +1467,7 @@ out: static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; - struct unix_sock *u; + struct unix_address *addr; DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); int err = 0; @@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) sock_hold(sk); } - u = unix_sk(sk); - unix_state_lock(sk); - if (!u->addr) { + addr = smp_load_acquire(&unix_sk(sk)->addr); + if (!addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; err = sizeof(short); } else { - struct unix_address *addr = u->addr; - err = addr->len; memcpy(sunaddr, addr->name, addr->len); } - unix_state_unlock(sk); sock_put(sk); out: return err; @@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { - struct unix_sock *u = unix_sk(sk); + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); - if (u->addr) { - msg->msg_namelen = u->addr->len; - memcpy(msg->msg_name, u->addr->name, u->addr->len); + if (addr) { + msg->msg_namelen = addr->len; + memcpy(msg->msg_name, addr->name, addr->len); } } @@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk) if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; - unix_state_lock(sk); + if (!smp_load_acquire(&unix_sk(sk)->addr)) + return -ENOENT; + path = unix_sk(sk)->path; - if (!path.dentry) { - unix_state_unlock(sk); + if (!path.dentry) return -ENOENT; - } path_get(&path); - unix_state_unlock(sk); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) @@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); - if (u->addr) { + if (u->addr) { // under unix_table_lock here int i, len; seq_putc(seq, ' '); diff --git a/net/unix/diag.c b/net/unix/diag.c index 384c84e83462..3183d9b8ab33 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -10,7 +10,8 @@ static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { - struct unix_address *addr = unix_sk(sk)->addr; + /* might or might not have unix_table_lock */ + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) return 0; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 5d3cce9e8744..15eb5d3d4750 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void) { struct virtio_vsock *vsock = virtio_vsock_get(); + if (!vsock) + return VMADDR_CID_ANY; + return vsock->guest_cid; } @@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) virtio_vsock_update_guest_cid(vsock); - ret = vsock_core_init(&virtio_transport.transport); - if (ret < 0) - goto out_vqs; - vsock->rx_buf_nr = 0; vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); @@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) mutex_unlock(&the_virtio_vsock_mutex); return 0; -out_vqs: - vsock->vdev->config->del_vqs(vsock->vdev); out: kfree(vsock); mutex_unlock(&the_virtio_vsock_mutex); @@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev) flush_work(&vsock->event_work); flush_work(&vsock->send_pkt_work); + /* Reset all connected sockets when the device disappear */ + vsock_for_each_connected_socket(virtio_vsock_reset_sock); + vdev->config->reset(vdev); mutex_lock(&vsock->rx_lock); @@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev) mutex_lock(&the_virtio_vsock_mutex); the_virtio_vsock = NULL; - vsock_core_exit(); mutex_unlock(&the_virtio_vsock_mutex); vdev->config->del_vqs(vdev); @@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void) virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); if (!virtio_vsock_workqueue) return -ENOMEM; + ret = register_virtio_driver(&virtio_vsock_driver); if (ret) - destroy_workqueue(virtio_vsock_workqueue); + goto out_wq; + + ret = vsock_core_init(&virtio_transport.transport); + if (ret) + goto out_vdr; + + return 0; + +out_vdr: + unregister_virtio_driver(&virtio_vsock_driver); +out_wq: + destroy_workqueue(virtio_vsock_workqueue); return ret; + } static void __exit virtio_vsock_exit(void) { + vsock_core_exit(); unregister_virtio_driver(&virtio_vsock_driver); destroy_workqueue(virtio_vsock_workqueue); } diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index c361ce782412..c3d5ab01fba7 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work) static void vmci_transport_destruct(struct vsock_sock *vsk) { + /* transport can be NULL if we hit a failure at init() time */ + if (!vmci_trans(vsk)) + return; + /* Ensure that the detach callback doesn't use the sk/vsk * we are about to destruct. */ diff --git a/net/wireless/ap.c b/net/wireless/ap.c index 882d97bdc6bf..550ac9d827fe 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c @@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, cfg80211_sched_dfs_chan_update(rdev); } + schedule_work(&cfg80211_disconnect_work); + return err; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 623dfe5e211c..b36ad8efb5e5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) ASSERT_RTNL(); + flush_work(&wdev->pmsr_free_wk); + nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); list_del_rcu(&wdev->list); diff --git a/net/wireless/core.h b/net/wireless/core.h index c5d6f3418601..f6b40563dc63 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz); +extern struct work_struct cfg80211_disconnect_work; + /** * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable * @wiphy: the wiphy to validate against diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 74150ad95823..d91a408db113 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = - NLA_POLICY_MAX(NLA_U8, 15), + NLA_POLICY_MAX(NLA_U8, 31), [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index de9286703280..0216ab555249 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) if (err) goto out_err; } else { - memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), - ETH_ALEN); + memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); memset(req->mac_addr_mask, 0xff, ETH_ALEN); } @@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) req->n_peers = count; req->cookie = cfg80211_assign_cookie(rdev); + req->nl_portid = info->snd_portid; err = rdev_start_pmsr(rdev, wdev, req); if (err) @@ -530,14 +530,14 @@ free: } EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); -void cfg80211_pmsr_free_wk(struct work_struct *work) +static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) { - struct wireless_dev *wdev = container_of(work, struct wireless_dev, - pmsr_free_wk); struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmsr_request *req, *tmp; LIST_HEAD(free_list); + lockdep_assert_held(&wdev->mtx); + spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { if (req->nl_portid) @@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work) spin_unlock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &free_list, list) { - wdev_lock(wdev); rdev_abort_pmsr(rdev, wdev, req); - wdev_unlock(wdev); kfree(req); } } +void cfg80211_pmsr_free_wk(struct work_struct *work) +{ + struct wireless_dev *wdev = container_of(work, struct wireless_dev, + pmsr_free_wk); + + wdev_lock(wdev); + cfg80211_pmsr_process_abort(wdev); + wdev_unlock(wdev); +} + void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) { struct cfg80211_pmsr_request *req; @@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) spin_unlock_bh(&wdev->pmsr_lock); if (found) - schedule_work(&wdev->pmsr_free_wk); - flush_work(&wdev->pmsr_free_wk); + cfg80211_pmsr_process_abort(wdev); + WARN_ON(!list_empty(&wdev->pmsr_list)); } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f741d8376a46..7d34cb884840 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work) rtnl_unlock(); } -static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); +DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); /* diff --git a/net/wireless/util.c b/net/wireless/util.c index cd48cdd582c0..ec30e3732c7b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -5,7 +5,7 @@ * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation */ #include <linux/export.h> #include <linux/bitops.h> @@ -19,6 +19,7 @@ #include <linux/mpls.h> #include <linux/gcd.h> #include <linux/bitfield.h> +#include <linux/nospec.h> #include "core.h" #include "rdev-ops.h" @@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, { unsigned int dscp; unsigned char vlan_priority; + unsigned int ret; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ - if (skb->priority >= 256 && skb->priority <= 263) - return skb->priority - 256; + if (skb->priority >= 256 && skb->priority <= 263) { + ret = skb->priority - 256; + goto out; + } if (skb_vlan_tag_present(skb)) { vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; - if (vlan_priority > 0) - return vlan_priority; + if (vlan_priority > 0) { + ret = vlan_priority; + goto out; + } } switch (skb->protocol) { @@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, if (!mpls) return 0; - return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) + ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; + goto out; } case htons(ETH_P_80221): /* 802.21 is always network control traffic */ @@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, unsigned int i, tmp_dscp = dscp >> 2; for (i = 0; i < qos_map->num_des; i++) { - if (tmp_dscp == qos_map->dscp_exception[i].dscp) - return qos_map->dscp_exception[i].up; + if (tmp_dscp == qos_map->dscp_exception[i].dscp) { + ret = qos_map->dscp_exception[i].up; + goto out; + } } for (i = 0; i < 8; i++) { if (tmp_dscp >= qos_map->up[i].low && - tmp_dscp <= qos_map->up[i].high) - return i; + tmp_dscp <= qos_map->up[i].high) { + ret = i; + goto out; + } } } - return dscp >> 5; + ret = dscp >> 5; +out: + return array_index_nospec(ret, IEEE80211_NUM_TIDS); } EXPORT_SYMBOL(cfg80211_classify8021d); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5121729b8b63..eff31348e20b 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) unsigned int lci = 1; struct sock *sk; - read_lock_bh(&x25_list_lock); - - while ((sk = __x25_find_socket(lci, nb)) != NULL) { + while ((sk = x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } + cond_resched(); } - read_unlock_bh(&x25_list_lock); return lci; } @@ -681,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; - if (!sock_flag(sk, SOCK_ZAPPED) || - addr_len != sizeof(struct sockaddr_x25) || + if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; @@ -701,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } lock_sock(sk); - x25_sk(sk)->source_addr = addr->sx25_addr; - x25_insert_socket(sk); - sock_reset_flag(sk, SOCK_ZAPPED); + if (sock_flag(sk, SOCK_ZAPPED)) { + x25_sk(sk)->source_addr = addr->sx25_addr; + x25_insert_socket(sk); + sock_reset_flag(sk, SOCK_ZAPPED); + } else { + rc = -EINVAL; + } release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index d4de871e7d4d..37e1fe180769 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, return 0; err_unreg_umem: - xdp_clear_umem_at_qid(dev, queue_id); if (!force_zc) err = 0; /* fallback to copy mode */ + if (err) + xdp_clear_umem_at_qid(dev, queue_id); out_rtnl_unlock: rtnl_unlock(); return err; @@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) if (!umem->pgs) return -ENOMEM; - down_write(¤t->mm->mmap_sem); - npgs = get_user_pages(umem->address, umem->npgs, - gup_flags, &umem->pgs[0], NULL); - up_write(¤t->mm->mmap_sem); + down_read(¤t->mm->mmap_sem); + npgs = get_user_pages_longterm(umem->address, umem->npgs, + gup_flags, &umem->pgs[0], NULL); + up_read(¤t->mm->mmap_sem); if (npgs != umem->npgs) { if (npgs >= 0) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index a03268454a27..85e4fe4f18cc 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock) xskq_destroy(xs->rx); xskq_destroy(xs->tx); - xdp_put_umem(xs->umem); sock_orphan(sk); sock->sk = NULL; @@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, if (!umem) return -EINVAL; + /* Matches the smp_wmb() in XDP_UMEM_REG */ + smp_rmb(); if (offset == XDP_UMEM_PGOFF_FILL_RING) q = READ_ONCE(umem->fq); else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) @@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, if (!q) return -EINVAL; + /* Matches the smp_wmb() in xsk_init_queue */ + smp_rmb(); qpg = virt_to_head_page(q->ring); if (size > (PAGE_SIZE << compound_order(qpg))) return -EINVAL; @@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = { .sendpage = sock_no_sendpage, }; +static void xsk_destruct(struct sock *sk) +{ + struct xdp_sock *xs = xdp_sk(sk); + + if (!sock_flag(sk, SOCK_DEAD)) + return; + + xdp_put_umem(xs->umem); + + sk_refcnt_debug_dec(sk); +} + static int xsk_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, sk->sk_family = PF_XDP; + sk->sk_destruct = xsk_destruct; + sk_refcnt_debug_inc(sk); + sock_set_flag(sk, SOCK_RCU_FREE); xs = xdp_sk(sk); diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6be8c7df15bb..dbb3c1945b5c 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) int ifindex; struct xfrm_if *xi; - if (!skb->dev) + if (!secpath_exists(skb) || !skb->dev) return NULL; - xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); + xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); ifindex = skb->dev->ifindex; for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 934492bad8e0..8d1a898d0ba5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work) mutex_unlock(&hash_resize_mutex); } -static void xfrm_hash_reset_inexact_table(struct net *net) -{ - struct xfrm_pol_inexact_bin *b; - - lockdep_assert_held(&net->xfrm.xfrm_policy_lock); - - list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins) - INIT_HLIST_HEAD(&b->hhead); -} - /* Make sure *pol can be inserted into fastbin. * Useful to check that later insert requests will be sucessful * (provided xfrm_policy_lock is held throughout). @@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net, u16 family) { unsigned int matched_s, matched_d; - struct hlist_node *newpos = NULL; struct xfrm_policy *policy, *p; matched_s = 0; matched_d = 0; list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { + struct hlist_node *newpos = NULL; bool matches_s, matches_d; if (!policy->bydst_reinsert) @@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net, policy->bydst_reinsert = false; hlist_for_each_entry(p, &n->hhead, bydst) { - if (policy->priority >= p->priority) + if (policy->priority > p->priority) + newpos = &p->bydst; + else if (policy->priority == p->priority && + policy->pos > p->pos) newpos = &p->bydst; else break; } if (newpos) - hlist_add_behind(&policy->bydst, newpos); + hlist_add_behind_rcu(&policy->bydst, newpos); else - hlist_add_head(&policy->bydst, &n->hhead); + hlist_add_head_rcu(&policy->bydst, &n->hhead); /* paranoia checks follow. * Check that the reinserted policy matches at least @@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, struct rb_root *new, u16 family) { - struct rb_node **p, *parent = NULL; struct xfrm_pol_inexact_node *node; + struct rb_node **p, *parent; /* we should not have another subtree here */ WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); - +restart: + parent = NULL; p = &new->rb_node; while (*p) { u8 prefixlen; @@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, } else { struct xfrm_policy *tmp; - hlist_for_each_entry(tmp, &node->hhead, bydst) - tmp->bydst_reinsert = true; - hlist_for_each_entry(tmp, &n->hhead, bydst) + hlist_for_each_entry(tmp, &n->hhead, bydst) { tmp->bydst_reinsert = true; + hlist_del_rcu(&tmp->bydst); + } - INIT_HLIST_HEAD(&node->hhead); xfrm_policy_inexact_list_reinsert(net, node, family); if (node->prefixlen == n->prefixlen) { @@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, kfree_rcu(n, rcu); n = node; n->prefixlen = prefixlen; - *p = new->rb_node; - parent = NULL; + goto restart; } } @@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net, family); } - hlist_for_each_entry(tmp, &v->hhead, bydst) - tmp->bydst_reinsert = true; - hlist_for_each_entry(tmp, &n->hhead, bydst) + hlist_for_each_entry(tmp, &v->hhead, bydst) { tmp->bydst_reinsert = true; + hlist_del_rcu(&tmp->bydst); + } - INIT_HLIST_HEAD(&n->hhead); xfrm_policy_inexact_list_reinsert(net, n, family); } @@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); spin_lock_bh(&net->xfrm.xfrm_policy_lock); + write_seqcount_begin(&xfrm_policy_hash_generation); /* make sure that we can insert the indirect policies again before * we start with destructive action. @@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work) } /* reset the bydst and inexact table in all directions */ - xfrm_hash_reset_inexact_table(net); - for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { - INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); + struct hlist_node *n; + + hlist_for_each_entry_safe(policy, n, + &net->xfrm.policy_inexact[dir], + bydst_inexact_list) + hlist_del_init(&policy->bydst_inexact_list); + hmask = net->xfrm.policy_bydst[dir].hmask; odst = net->xfrm.policy_bydst[dir].table; for (i = hmask; i >= 0; i--) @@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work) newpos = NULL; chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); + + hlist_del_rcu(&policy->bydst); + if (!chain) { void *p = xfrm_policy_inexact_insert(policy, dir, 0); @@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) out_unlock: __xfrm_policy_inexact_flush(net); + write_seqcount_end(&xfrm_policy_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); mutex_unlock(&hash_resize_mutex); @@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, dst_copy_metrics(dst1, dst); if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { - __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); + __u32 mark = 0; + + if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) + mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); family = xfrm[i]->props.family; dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, @@ -3311,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (ifcb) { xi = ifcb->decode_session(skb); - if (xi) + if (xi) { if_id = xi->p.if_id; + net = xi->net; + } } rcu_read_unlock(); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 23c92891758a..1bb971f46fc6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_free); -static void xfrm_state_gc_destroy(struct xfrm_state *x) +static void ___xfrm_state_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); @@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work) synchronize_rcu(); hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) - xfrm_state_gc_destroy(x); + ___xfrm_state_destroy(x); } static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) @@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) } EXPORT_SYMBOL(xfrm_state_alloc); -void __xfrm_state_destroy(struct xfrm_state *x) +void __xfrm_state_destroy(struct xfrm_state *x, bool sync) { WARN_ON(x->km.state != XFRM_STATE_DEAD); - spin_lock_bh(&xfrm_state_gc_lock); - hlist_add_head(&x->gclist, &xfrm_state_gc_list); - spin_unlock_bh(&xfrm_state_gc_lock); - schedule_work(&xfrm_state_gc_work); + if (sync) { + synchronize_rcu(); + ___xfrm_state_destroy(x); + } else { + spin_lock_bh(&xfrm_state_gc_lock); + hlist_add_head(&x->gclist, &xfrm_state_gc_list); + spin_unlock_bh(&xfrm_state_gc_lock); + schedule_work(&xfrm_state_gc_work); + } } EXPORT_SYMBOL(__xfrm_state_destroy); @@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool } #endif -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) { int i, err = 0, cnt = 0; @@ -730,7 +735,10 @@ restart: err = xfrm_state_delete(x); xfrm_audit_state_delete(x, err ? 0 : 1, task_valid); - xfrm_state_put(x); + if (sync) + xfrm_state_put_sync(x); + else + xfrm_state_put(x); if (!err) cnt++; @@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) if (atomic_read(&t->tunnel_users) == 2) xfrm_state_delete(t); atomic_dec(&t->tunnel_users); - xfrm_state_put(t); + xfrm_state_put_sync(t); x->tunnel = NULL; } } @@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net) unsigned int sz; flush_work(&net->xfrm.state_hash_work); - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); flush_work(&xfrm_state_gc_work); + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); WARN_ON(!list_empty(&net->xfrm.state_all)); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 277c1c46fe94..a131f9ff979e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) if (!ut[i].family) ut[i].family = family; - if ((ut[i].mode == XFRM_MODE_TRANSPORT) && - (ut[i].family != prev_family)) - return -EINVAL; - + switch (ut[i].mode) { + case XFRM_MODE_TUNNEL: + case XFRM_MODE_BEET: + break; + default: + if (ut[i].family != prev_family) + return -EINVAL; + break; + } if (ut[i].mode >= XFRM_MODE_MAX) return -EINVAL; @@ -1927,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct xfrm_usersa_flush *p = nlmsg_data(nlh); int err; - err = xfrm_state_flush(net, p->proto, true); + err = xfrm_state_flush(net, p->proto, true, false); if (err) { if (err == -ESRCH) /* empty table */ return 0; diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index 33e67bd1dc34..32234481ad7d 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c @@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid, me->verbose = verbose; - me->fd = open("/dev/mei", O_RDWR); + me->fd = open("/dev/mei0", O_RDWR); if (me->fd == -1) { mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); goto err; diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 77cebad0474e..f75e7bda4889 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s) fprintf(stderr, "Read error or end of file.\n"); return -1; } - if (strlen(sym) > KSYM_NAME_LEN) { - fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" + if (strlen(sym) >= KSYM_NAME_LEN) { + fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n" "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", sym, strlen(sym), KSYM_NAME_LEN); return -1; diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 08c88de0ffda..11975ec8d566 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -1444,7 +1444,10 @@ check: new = aa_label_merge(label, target, GFP_KERNEL); if (IS_ERR_OR_NULL(new)) { info = "failed to build target label"; - error = PTR_ERR(new); + if (!new) + error = -ENOMEM; + else + error = PTR_ERR(new); new = NULL; perms.allow = 0; goto audit; diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 2c010874329f..8db1731d046a 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv, return apparmor_ip_postroute(priv, skb, state); } +#if IS_ENABLED(CONFIG_IPV6) static unsigned int apparmor_ipv6_postroute(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { return apparmor_ip_postroute(priv, skb, state); } +#endif static const struct nf_hook_ops apparmor_nf_ops[] = { { diff --git a/security/keys/internal.h b/security/keys/internal.h index 479909b858c7..8f533c81aa8d 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm) return key_task_permission(key_ref, current_cred(), perm); } -/* - * Authorisation record for request_key(). - */ -struct request_key_auth { - struct key *target_key; - struct key *dest_keyring; - const struct cred *cred; - void *callout_info; - size_t callout_len; - pid_t pid; -} __randomize_layout; - extern struct key_type key_type_request_key_auth; extern struct key *request_key_auth_new(struct key *target, + const char *op, const void *callout_info, size_t callout_len, struct key *dest_keyring); diff --git a/security/keys/key.c b/security/keys/key.c index 44a80d6741a1..696f1c092c50 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { - if (user->qnkeys + 1 >= maxkeys || - user->qnbytes + quotalen >= maxbytes || + if (user->qnkeys + 1 > maxkeys || + user->qnbytes + quotalen > maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } @@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; + key->last_used_at = ktime_get_real_seconds(); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index e8093d025966..7bbe03593e58 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -25,6 +25,7 @@ #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> +#include <keys/request_key_auth-type.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 diff --git a/security/keys/keyring.c b/security/keys/keyring.c index eadebb92986a..f81372f53dd7 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring, BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); - if (ctx->index_key.description) - ctx->index_key.desc_len = strlen(ctx->index_key.description); - /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ @@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring, struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, + .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, diff --git a/security/keys/proc.c b/security/keys/proc.c index d2b802072693..78ac305d715e 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v) int rc; struct keyring_search_context ctx = { - .index_key.type = key->type, - .index_key.description = key->description, + .index_key = key->index_key, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 02c77e928f68..0e0b9ccad2f8 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -19,6 +19,7 @@ #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/uaccess.h> +#include <keys/request_key_auth-type.h> #include "internal.h" /* Session keyring create vs join semaphore */ diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 301f0e300dbd..7a0c6b666ff0 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -18,31 +18,30 @@ #include <linux/keyctl.h> #include <linux/slab.h> #include "internal.h" +#include <keys/request_key_auth-type.h> #define key_negative_timeout 60 /* default timeout on a negative key's existence */ /** * complete_request_key - Complete the construction of a key. - * @cons: The key construction record. + * @auth_key: The authorisation key. * @error: The success or failute of the construction. * * Complete the attempt to construct a key. The key will be negated * if an error is indicated. The authorisation key will be revoked * unconditionally. */ -void complete_request_key(struct key_construction *cons, int error) +void complete_request_key(struct key *authkey, int error) { - kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); + struct request_key_auth *rka = get_request_key_auth(authkey); + struct key *key = rka->target_key; + + kenter("%d{%d},%d", authkey->serial, key->serial, error); if (error < 0) - key_negate_and_link(cons->key, key_negative_timeout, NULL, - cons->authkey); + key_negate_and_link(key, key_negative_timeout, NULL, authkey); else - key_revoke(cons->authkey); - - key_put(cons->key); - key_put(cons->authkey); - kfree(cons); + key_revoke(authkey); } EXPORT_SYMBOL(complete_request_key); @@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ -static int call_sbin_request_key(struct key_construction *cons, - const char *op, - void *aux) +static int call_sbin_request_key(struct key *authkey, void *aux) { static char const request_key[] = "/sbin/request-key"; + struct request_key_auth *rka = get_request_key_auth(authkey); const struct cred *cred = current_cred(); key_serial_t prkey, sskey; - struct key *key = cons->key, *authkey = cons->authkey, *keyring, - *session; + struct key *key = rka->target_key, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; - kenter("{%d},{%d},%s", key->serial, authkey->serial, op); + kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op); ret = install_user_keyrings(); if (ret < 0) @@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons, /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; - argv[i++] = (char *) op; + argv[i++] = (char *)rka->op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; @@ -191,7 +188,7 @@ error_link: key_put(keyring); error_alloc: - complete_request_key(cons, ret); + complete_request_key(authkey, ret); kleave(" = %d", ret); return ret; } @@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring) { - struct key_construction *cons; request_key_actor_t actor; struct key *authkey; int ret; kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); - cons = kmalloc(sizeof(*cons), GFP_KERNEL); - if (!cons) - return -ENOMEM; - /* allocate an authorisation key */ - authkey = request_key_auth_new(key, callout_info, callout_len, + authkey = request_key_auth_new(key, "create", callout_info, callout_len, dest_keyring); - if (IS_ERR(authkey)) { - kfree(cons); - ret = PTR_ERR(authkey); - authkey = NULL; - } else { - cons->authkey = key_get(authkey); - cons->key = key_get(key); + if (IS_ERR(authkey)) + return PTR_ERR(authkey); - /* make the call */ - actor = call_sbin_request_key; - if (key->type->request_key) - actor = key->type->request_key; + /* Make the call */ + actor = call_sbin_request_key; + if (key->type->request_key) + actor = key->type->request_key; - ret = actor(cons, "create", aux); + ret = actor(authkey, aux); - /* check that the actor called complete_request_key() prior to - * returning an error */ - WARN_ON(ret < 0 && - !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); - key_put(authkey); - } + /* check that the actor called complete_request_key() prior to + * returning an error */ + WARN_ON(ret < 0 && + !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); + key_put(authkey); kleave(" = %d", ret); return ret; } @@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) if (cred->request_key_auth) { authkey = cred->request_key_auth; down_read(&authkey->sem); - rka = authkey->payload.data[0]; + rka = get_request_key_auth(authkey); if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) dest_keyring = @@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type, struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, + .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 87ea2f54dedc..bda6201c6c45 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -17,7 +17,7 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include "internal.h" -#include <keys/user-type.h> +#include <keys/request_key_auth-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); @@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key, static void request_key_auth_describe(const struct key *key, struct seq_file *m) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); seq_puts(m, "key:"); seq_puts(m, key->description); @@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key, static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); size_t datalen; long ret; @@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key, */ static void request_key_auth_revoke(struct key *key) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); kenter("{%d}", key->serial); @@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka) */ static void request_key_auth_destroy(struct key *key) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); kenter("{%d}", key->serial); @@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key) * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ -struct key *request_key_auth_new(struct key *target, const void *callout_info, - size_t callout_len, struct key *dest_keyring) +struct key *request_key_auth_new(struct key *target, const char *op, + const void *callout_info, size_t callout_len, + struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; @@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, if (!rka->callout_info) goto error_free_rka; rka->callout_len = callout_len; + strlcpy(rka->op, op, sizeof(rka->op)); /* see if the calling process is already servicing the key request of * another process */ @@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) struct key *authkey; key_ref_t authkey_ref; - sprintf(description, "%x", target_id); + ctx.index_key.desc_len = sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); diff --git a/security/lsm_audit.c b/security/lsm_audit.c index f84001019356..33028c098ef3 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, if (a->u.net->sk) { struct sock *sk = a->u.net->sk; struct unix_sock *u; + struct unix_address *addr; int len = 0; char *p = NULL; @@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab, #endif case AF_UNIX: u = unix_sk(sk); + addr = smp_load_acquire(&u->addr); + if (!addr) + break; if (u->path.dentry) { audit_log_d_path(ab, " path=", &u->path); break; } - if (!u->addr) - break; - len = u->addr->len-sizeof(short); - p = &u->addr->name->sun_path[0]; + len = addr->len-sizeof(short); + p = &addr->name->sun_path[0]; audit_log_format(ab, " path="); if (*p) audit_log_untrustedstring(ab, p); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3bd1286eb5a8..c8413d44973c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1864,6 +1864,8 @@ enum { ALC887_FIXUP_BASS_CHMAP, ALC1220_FIXUP_GB_DUAL_CODECS, ALC1220_FIXUP_CLEVO_P950, + ALC1220_FIXUP_SYSTEM76_ORYP5, + ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -2065,6 +2067,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, snd_hda_override_conn_list(codec, 0x1b, 1, conn1); } +static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action); + +static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + alc1220_fixup_clevo_p950(codec, fix, action); + alc_fixup_headset_mode_no_hp_mic(codec, fix, action); +} + static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = HDA_FIXUP_PINS, @@ -2309,6 +2322,19 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_p950, }, + [ALC1220_FIXUP_SYSTEM76_ORYP5] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_system76_oryp5, + }, + [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + {} + }, + .chained = true, + .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2385,6 +2411,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), @@ -5644,6 +5672,7 @@ enum { ALC294_FIXUP_ASUS_SPK, ALC225_FIXUP_HEADSET_JACK, ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, }; static const struct hda_fixup alc269_fixups[] = { @@ -6599,6 +6628,17 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, + [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Disable PCBEEP-IN passthrough */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7284,7 +7324,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x19, 0x03a11020}, {0x21, 0x0321101f}), - SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x19, 0x04a11040}, diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index e6ce18c21b98..419114edfd57 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -55,6 +55,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_CS35L33 if I2C select SND_SOC_CS35L34 if I2C select SND_SOC_CS35L35 if I2C + select SND_SOC_CS35L36 if I2C select SND_SOC_CS42L42 if I2C select SND_SOC_CS42L51_I2C if I2C select SND_SOC_CS42L52 if I2C && INPUT @@ -484,6 +485,10 @@ config SND_SOC_CS35L35 tristate "Cirrus Logic CS35L35 CODEC" depends on I2C +config SND_SOC_CS35L36 + tristate "Cirrus Logic CS35L36 CODEC" + depends on I2C + config SND_SOC_CS42L42 tristate "Cirrus Logic CS42L42 CODEC" depends on I2C diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index b07dfb5fa700..aab2ad95a137 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -47,6 +47,7 @@ snd-soc-cs35l32-objs := cs35l32.o snd-soc-cs35l33-objs := cs35l33.o snd-soc-cs35l34-objs := cs35l34.o snd-soc-cs35l35-objs := cs35l35.o +snd-soc-cs35l36-objs := cs35l36.o snd-soc-cs42l42-objs := cs42l42.o snd-soc-cs42l51-objs := cs42l51.o snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o @@ -319,6 +320,7 @@ obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o obj-$(CONFIG_SND_SOC_CS35L34) += snd-soc-cs35l34.o obj-$(CONFIG_SND_SOC_CS35L35) += snd-soc-cs35l35.o +obj-$(CONFIG_SND_SOC_CS35L36) += snd-soc-cs35l36.o obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42.o obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c index 4b60ebee491d..96d7cb2e4a56 100644 --- a/sound/soc/codecs/ad193x.c +++ b/sound/soc/codecs/ad193x.c @@ -37,6 +37,13 @@ static SOC_ENUM_SINGLE_DECL(ad193x_deemp_enum, AD193X_DAC_CTRL2, 1, static const DECLARE_TLV_DB_MINMAX(adau193x_tlv, -9563, 0); +static const unsigned int ad193x_sb[] = {32}; + +static struct snd_pcm_hw_constraint_list constr = { + .list = ad193x_sb, + .count = ARRAY_SIZE(ad193x_sb), +}; + static const struct snd_kcontrol_new ad193x_snd_controls[] = { /* DAC volume control */ SOC_DOUBLE_R_TLV("DAC1 Volume", AD193X_DAC_L1_VOL, @@ -93,6 +100,15 @@ static const struct snd_soc_dapm_widget ad193x_adc_widgets[] = { SND_SOC_DAPM_INPUT("ADC2IN"), }; +static int ad193x_check_pll(struct snd_soc_dapm_widget *source, + struct snd_soc_dapm_widget *sink) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(source->dapm); + struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component); + + return !!ad193x->sysclk; +} + static const struct snd_soc_dapm_route audio_paths[] = { { "DAC", NULL, "SYSCLK" }, { "DAC Output", NULL, "DAC" }, @@ -101,7 +117,7 @@ static const struct snd_soc_dapm_route audio_paths[] = { { "DAC2OUT", NULL, "DAC Output" }, { "DAC3OUT", NULL, "DAC Output" }, { "DAC4OUT", NULL, "DAC Output" }, - { "SYSCLK", NULL, "PLL_PWR" }, + { "SYSCLK", NULL, "PLL_PWR", &ad193x_check_pll }, }; static const struct snd_soc_dapm_route ad193x_adc_audio_paths[] = { @@ -181,23 +197,26 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai, { struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(codec_dai->component); unsigned int adc_serfmt = 0; + unsigned int dac_serfmt = 0; unsigned int adc_fmt = 0; unsigned int dac_fmt = 0; /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S - * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A) + * with TDM), ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A) and DAC I2S mode + * (SND_SOC_DAIFMT_I2S) */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: adc_serfmt |= AD193X_ADC_SERFMT_TDM; + dac_serfmt |= AD193X_DAC_SERFMT_STEREO; break; case SND_SOC_DAIFMT_DSP_A: adc_serfmt |= AD193X_ADC_SERFMT_AUX; + dac_serfmt |= AD193X_DAC_SERFMT_TDM; break; default: if (ad193x_has_adc(ad193x)) return -EINVAL; - break; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { @@ -221,6 +240,12 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai, return -EINVAL; } + /* For DSP_*, LRCLK's polarity must be inverted */ + if (fmt & SND_SOC_DAIFMT_DSP_A) { + change_bit(ffs(AD193X_DAC_LEFT_HIGH) - 1, + (unsigned long *)&dac_fmt); + } + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */ adc_fmt |= AD193X_ADC_LCR_MASTER; @@ -248,6 +273,8 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai, regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL2, AD193X_ADC_FMT_MASK, adc_fmt); } + regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL0, + AD193X_DAC_SERFMT_MASK, dac_serfmt); regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL1, AD193X_DAC_FMT_MASK, dac_fmt); @@ -258,7 +285,22 @@ static int ad193x_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_component *component = codec_dai->component; + struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component); + + if (clk_id == AD193X_SYSCLK_MCLK) { + /* MCLK must be 512 x fs */ + if (dir == SND_SOC_CLOCK_OUT || freq != 24576000) + return -EINVAL; + + regmap_update_bits(ad193x->regmap, AD193X_PLL_CLK_CTRL1, + AD193X_PLL_SRC_MASK, + AD193X_PLL_DAC_SRC_MCLK | + AD193X_PLL_CLK_SRC_MCLK); + + snd_soc_dapm_sync(dapm); + return 0; + } switch (freq) { case 12288000: case 18432000: @@ -321,7 +363,16 @@ static int ad193x_hw_params(struct snd_pcm_substream *substream, return 0; } +static int ad193x_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + return snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_SAMPLE_BITS, + &constr); +} + static const struct snd_soc_dai_ops ad193x_dai_ops = { + .startup = ad193x_startup, .hw_params = ad193x_hw_params, .digital_mute = ad193x_mute, .set_tdm_slot = ad193x_set_tdm_slot, @@ -351,6 +402,20 @@ static struct snd_soc_dai_driver ad193x_dai = { .ops = &ad193x_dai_ops, }; +/* codec DAI instance for DAC only */ +static struct snd_soc_dai_driver ad193x_no_adc_dai = { + .name = "ad193x-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_48000, + .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, + }, + .ops = &ad193x_dai_ops, +}; + static int ad193x_component_probe(struct snd_soc_component *component) { struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component); @@ -444,8 +509,11 @@ int ad193x_probe(struct device *dev, struct regmap *regmap, dev_set_drvdata(dev, ad193x); + if (ad193x_has_adc(ad193x)) + return devm_snd_soc_register_component(dev, &soc_component_dev_ad193x, + &ad193x_dai, 1); return devm_snd_soc_register_component(dev, &soc_component_dev_ad193x, - &ad193x_dai, 1); + &ad193x_no_adc_dai, 1); } EXPORT_SYMBOL_GPL(ad193x_probe); diff --git a/sound/soc/codecs/ad193x.h b/sound/soc/codecs/ad193x.h index 8b1e65f928d2..27d6afbd7dfb 100644 --- a/sound/soc/codecs/ad193x.h +++ b/sound/soc/codecs/ad193x.h @@ -31,6 +31,11 @@ int ad193x_probe(struct device *dev, struct regmap *regmap, #define AD193X_PLL_INPUT_512 (2 << 1) #define AD193X_PLL_INPUT_768 (3 << 1) #define AD193X_PLL_CLK_CTRL1 0x01 +#define AD193X_PLL_SRC_MASK 0x03 +#define AD193X_PLL_DAC_SRC_PLL 0 +#define AD193X_PLL_DAC_SRC_MCLK 1 +#define AD193X_PLL_CLK_SRC_PLL (0 << 1) +#define AD193X_PLL_CLK_SRC_MCLK (1 << 1) #define AD193X_DAC_CTRL0 0x02 #define AD193X_DAC_POWERDOWN 0x01 #define AD193X_DAC_SERFMT_MASK 0xC0 @@ -96,4 +101,7 @@ int ad193x_probe(struct device *dev, struct regmap *regmap, #define AD193X_NUM_REGS 17 +#define AD193X_SYSCLK_PLL 0 +#define AD193X_SYSCLK_MCLK 1 + #endif diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c index 116af6a9ce3b..11c53bcb71dd 100644 --- a/sound/soc/codecs/adau1977.c +++ b/sound/soc/codecs/adau1977.c @@ -885,13 +885,15 @@ static int adau1977_setup_micbias(struct adau1977 *adau1977) struct adau1977_platform_data *pdata = adau1977->dev->platform_data; unsigned int micbias; - if (pdata) { + if (pdata) micbias = pdata->micbias; - if (micbias > ADAU1977_MICBIAS_9V0) - return -EINVAL; - - } else { + else if (device_property_read_u32(adau1977->dev, "adi,micbias", + &micbias)) micbias = ADAU1977_MICBIAS_8V5; + + if (micbias > ADAU1977_MICBIAS_9V0) { + dev_err(adau1977->dev, "Invalid value for 'adi,micbias'\n"); + return -EINVAL; } return regmap_update_bits(adau1977->regmap, ADAU1977_REG_MICBIAS, diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index b14100b6a939..99a3af8a15ff 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -206,7 +206,7 @@ static const struct snd_soc_dai_ops cros_ec_i2s_dai_ops = { .set_fmt = cros_ec_i2s_set_dai_fmt, }; -struct snd_soc_dai_driver cros_ec_dai[] = { +static struct snd_soc_dai_driver cros_ec_dai[] = { { .name = "cros_ec_codec I2S", .id = 0, diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c new file mode 100644 index 000000000000..e9b5f76f27a8 --- /dev/null +++ b/sound/soc/codecs/cs35l36.c @@ -0,0 +1,1957 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// cs35l36.c -- CS35L36 ALSA SoC audio driver +// +// Copyright 2018 Cirrus Logic, Inc. +// +// Author: James Schulman <james.schulman@cirrus.com> + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/gpio/consumer.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/regmap.h> +#include <sound/core.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> +#include <sound/soc-dapm.h> +#include <linux/gpio.h> +#include <sound/initval.h> +#include <sound/tlv.h> +#include <sound/cs35l36.h> +#include <linux/of_irq.h> +#include <linux/completion.h> + +#include "cs35l36.h" + +/* + * Some fields take zero as a valid value so use a high bit flag that won't + * get written to the device to mark those. + */ +#define CS35L36_VALID_PDATA 0x80000000 + +static const char * const cs35l36_supplies[] = { + "VA", + "VP", +}; + +struct cs35l36_private { + struct device *dev; + struct cs35l36_platform_data pdata; + struct regmap *regmap; + struct regulator_bulk_data supplies[2]; + int num_supplies; + int clksrc; + int chip_version; + int rev_id; + int ldm_mode_sel; + struct gpio_desc *reset_gpio; +}; + +struct cs35l36_pll_config { + int freq; + int clk_cfg; + int fll_igain; +}; + +static const struct cs35l36_pll_config cs35l36_pll_sysclk[] = { + {32768, 0x00, 0x05}, + {8000, 0x01, 0x03}, + {11025, 0x02, 0x03}, + {12000, 0x03, 0x03}, + {16000, 0x04, 0x04}, + {22050, 0x05, 0x04}, + {24000, 0x06, 0x04}, + {32000, 0x07, 0x05}, + {44100, 0x08, 0x05}, + {48000, 0x09, 0x05}, + {88200, 0x0A, 0x06}, + {96000, 0x0B, 0x06}, + {128000, 0x0C, 0x07}, + {176400, 0x0D, 0x07}, + {192000, 0x0E, 0x07}, + {256000, 0x0F, 0x08}, + {352800, 0x10, 0x08}, + {384000, 0x11, 0x08}, + {512000, 0x12, 0x09}, + {705600, 0x13, 0x09}, + {750000, 0x14, 0x09}, + {768000, 0x15, 0x09}, + {1000000, 0x16, 0x0A}, + {1024000, 0x17, 0x0A}, + {1200000, 0x18, 0x0A}, + {1411200, 0x19, 0x0A}, + {1500000, 0x1A, 0x0A}, + {1536000, 0x1B, 0x0A}, + {2000000, 0x1C, 0x0A}, + {2048000, 0x1D, 0x0A}, + {2400000, 0x1E, 0x0A}, + {2822400, 0x1F, 0x0A}, + {3000000, 0x20, 0x0A}, + {3072000, 0x21, 0x0A}, + {3200000, 0x22, 0x0A}, + {4000000, 0x23, 0x0A}, + {4096000, 0x24, 0x0A}, + {4800000, 0x25, 0x0A}, + {5644800, 0x26, 0x0A}, + {6000000, 0x27, 0x0A}, + {6144000, 0x28, 0x0A}, + {6250000, 0x29, 0x08}, + {6400000, 0x2A, 0x0A}, + {6500000, 0x2B, 0x08}, + {6750000, 0x2C, 0x09}, + {7526400, 0x2D, 0x0A}, + {8000000, 0x2E, 0x0A}, + {8192000, 0x2F, 0x0A}, + {9600000, 0x30, 0x0A}, + {11289600, 0x31, 0x0A}, + {12000000, 0x32, 0x0A}, + {12288000, 0x33, 0x0A}, + {12500000, 0x34, 0x08}, + {12800000, 0x35, 0x0A}, + {13000000, 0x36, 0x0A}, + {13500000, 0x37, 0x0A}, + {19200000, 0x38, 0x0A}, + {22579200, 0x39, 0x0A}, + {24000000, 0x3A, 0x0A}, + {24576000, 0x3B, 0x0A}, + {25000000, 0x3C, 0x0A}, + {25600000, 0x3D, 0x0A}, + {26000000, 0x3E, 0x0A}, + {27000000, 0x3F, 0x0A}, +}; + +static struct reg_default cs35l36_reg[] = { + {CS35L36_TESTKEY_CTRL, 0x00000000}, + {CS35L36_USERKEY_CTL, 0x00000000}, + {CS35L36_OTP_CTRL1, 0x00002460}, + {CS35L36_OTP_CTRL2, 0x00000000}, + {CS35L36_OTP_CTRL3, 0x00000000}, + {CS35L36_OTP_CTRL4, 0x00000000}, + {CS35L36_OTP_CTRL5, 0x00000000}, + {CS35L36_PAC_CTL1, 0x00000004}, + {CS35L36_PAC_CTL2, 0x00000000}, + {CS35L36_PAC_CTL3, 0x00000000}, + {CS35L36_PWR_CTRL1, 0x00000000}, + {CS35L36_PWR_CTRL2, 0x00003321}, + {CS35L36_PWR_CTRL3, 0x01000010}, + {CS35L36_CTRL_OVRRIDE, 0x00000002}, + {CS35L36_AMP_OUT_MUTE, 0x00000000}, + {CS35L36_OTP_TRIM_STATUS, 0x00000000}, + {CS35L36_DISCH_FILT, 0x00000000}, + {CS35L36_PROTECT_REL_ERR, 0x00000000}, + {CS35L36_PAD_INTERFACE, 0x00000038}, + {CS35L36_PLL_CLK_CTRL, 0x00000010}, + {CS35L36_GLOBAL_CLK_CTRL, 0x00000003}, + {CS35L36_ADC_CLK_CTRL, 0x00000000}, + {CS35L36_SWIRE_CLK_CTRL, 0x00000000}, + {CS35L36_SP_SCLK_CLK_CTRL, 0x00000000}, + {CS35L36_MDSYNC_EN, 0x00000000}, + {CS35L36_MDSYNC_TX_ID, 0x00000000}, + {CS35L36_MDSYNC_PWR_CTRL, 0x00000000}, + {CS35L36_MDSYNC_DATA_TX, 0x00000000}, + {CS35L36_MDSYNC_TX_STATUS, 0x00000002}, + {CS35L36_MDSYNC_RX_STATUS, 0x00000000}, + {CS35L36_MDSYNC_ERR_STATUS, 0x00000000}, + {CS35L36_BSTCVRT_VCTRL1, 0x00000000}, + {CS35L36_BSTCVRT_VCTRL2, 0x00000001}, + {CS35L36_BSTCVRT_PEAK_CUR, 0x0000004A}, + {CS35L36_BSTCVRT_SFT_RAMP, 0x00000003}, + {CS35L36_BSTCVRT_COEFF, 0x00002424}, + {CS35L36_BSTCVRT_SLOPE_LBST, 0x00005800}, + {CS35L36_BSTCVRT_SW_FREQ, 0x00010000}, + {CS35L36_BSTCVRT_DCM_CTRL, 0x00002001}, + {CS35L36_BSTCVRT_DCM_MODE_FORCE, 0x00000000}, + {CS35L36_BSTCVRT_OVERVOLT_CTRL, 0x00000130}, + {CS35L36_VPI_LIMIT_MODE, 0x00000000}, + {CS35L36_VPI_LIMIT_MINMAX, 0x00003000}, + {CS35L36_VPI_VP_THLD, 0x00101010}, + {CS35L36_VPI_TRACK_CTRL, 0x00000000}, + {CS35L36_VPI_TRIG_MODE_CTRL, 0x00000000}, + {CS35L36_VPI_TRIG_STEPS, 0x00000000}, + {CS35L36_VI_SPKMON_FILT, 0x00000003}, + {CS35L36_VI_SPKMON_GAIN, 0x00000909}, + {CS35L36_VI_SPKMON_IP_SEL, 0x00000000}, + {CS35L36_DTEMP_WARN_THLD, 0x00000002}, + {CS35L36_DTEMP_STATUS, 0x00000000}, + {CS35L36_VPVBST_FS_SEL, 0x00000001}, + {CS35L36_VPVBST_VP_CTRL, 0x000001C0}, + {CS35L36_VPVBST_VBST_CTRL, 0x000001C0}, + {CS35L36_ASP_TX_PIN_CTRL, 0x00000028}, + {CS35L36_ASP_RATE_CTRL, 0x00090000}, + {CS35L36_ASP_FORMAT, 0x00000002}, + {CS35L36_ASP_FRAME_CTRL, 0x00180018}, + {CS35L36_ASP_TX1_TX2_SLOT, 0x00010000}, + {CS35L36_ASP_TX3_TX4_SLOT, 0x00030002}, + {CS35L36_ASP_TX5_TX6_SLOT, 0x00050004}, + {CS35L36_ASP_TX7_TX8_SLOT, 0x00070006}, + {CS35L36_ASP_RX1_SLOT, 0x00000000}, + {CS35L36_ASP_RX_TX_EN, 0x00000000}, + {CS35L36_ASP_RX1_SEL, 0x00000008}, + {CS35L36_ASP_TX1_SEL, 0x00000018}, + {CS35L36_ASP_TX2_SEL, 0x00000019}, + {CS35L36_ASP_TX3_SEL, 0x00000028}, + {CS35L36_ASP_TX4_SEL, 0x00000029}, + {CS35L36_ASP_TX5_SEL, 0x00000020}, + {CS35L36_ASP_TX6_SEL, 0x00000000}, + {CS35L36_SWIRE_P1_TX1_SEL, 0x00000018}, + {CS35L36_SWIRE_P1_TX2_SEL, 0x00000019}, + {CS35L36_SWIRE_P2_TX1_SEL, 0x00000028}, + {CS35L36_SWIRE_P2_TX2_SEL, 0x00000029}, + {CS35L36_SWIRE_P2_TX3_SEL, 0x00000020}, + {CS35L36_SWIRE_DP1_FIFO_CFG, 0x0000001B}, + {CS35L36_SWIRE_DP2_FIFO_CFG, 0x0000001B}, + {CS35L36_SWIRE_DP3_FIFO_CFG, 0x0000001B}, + {CS35L36_SWIRE_PCM_RX_DATA, 0x00000000}, + {CS35L36_SWIRE_FS_SEL, 0x00000001}, + {CS35L36_AMP_DIG_VOL_CTRL, 0x00008000}, + {CS35L36_VPBR_CFG, 0x02AA1905}, + {CS35L36_VBBR_CFG, 0x02AA1905}, + {CS35L36_VPBR_STATUS, 0x00000000}, + {CS35L36_VBBR_STATUS, 0x00000000}, + {CS35L36_OVERTEMP_CFG, 0x00000001}, + {CS35L36_AMP_ERR_VOL, 0x00000000}, + {CS35L36_CLASSH_CFG, 0x000B0405}, + {CS35L36_CLASSH_FET_DRV_CFG, 0x00000111}, + {CS35L36_NG_CFG, 0x00000033}, + {CS35L36_AMP_GAIN_CTRL, 0x00000273}, + {CS35L36_PWM_MOD_IO_CTRL, 0x00000000}, + {CS35L36_PWM_MOD_STATUS, 0x00000000}, + {CS35L36_DAC_MSM_CFG, 0x00000000}, + {CS35L36_AMP_SLOPE_CTRL, 0x00000B00}, + {CS35L36_AMP_PDM_VOLUME, 0x00000000}, + {CS35L36_AMP_PDM_RATE_CTRL, 0x00000000}, + {CS35L36_PDM_CH_SEL, 0x00000000}, + {CS35L36_AMP_NG_CTRL, 0x0000212F}, + {CS35L36_PDM_HIGHFILT_CTRL, 0x00000000}, + {CS35L36_PAC_INT0_CTRL, 0x00000001}, + {CS35L36_PAC_INT1_CTRL, 0x00000001}, + {CS35L36_PAC_INT2_CTRL, 0x00000001}, + {CS35L36_PAC_INT3_CTRL, 0x00000001}, + {CS35L36_PAC_INT4_CTRL, 0x00000001}, + {CS35L36_PAC_INT5_CTRL, 0x00000001}, + {CS35L36_PAC_INT6_CTRL, 0x00000001}, + {CS35L36_PAC_INT7_CTRL, 0x00000001}, +}; + +static bool cs35l36_readable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case CS35L36_SW_RESET: + case CS35L36_SW_REV: + case CS35L36_HW_REV: + case CS35L36_TESTKEY_CTRL: + case CS35L36_USERKEY_CTL: + case CS35L36_OTP_MEM30: + case CS35L36_OTP_CTRL1: + case CS35L36_OTP_CTRL2: + case CS35L36_OTP_CTRL3: + case CS35L36_OTP_CTRL4: + case CS35L36_OTP_CTRL5: + case CS35L36_PAC_CTL1: + case CS35L36_PAC_CTL2: + case CS35L36_PAC_CTL3: + case CS35L36_DEVICE_ID: + case CS35L36_FAB_ID: + case CS35L36_REV_ID: + case CS35L36_PWR_CTRL1: + case CS35L36_PWR_CTRL2: + case CS35L36_PWR_CTRL3: + case CS35L36_CTRL_OVRRIDE: + case CS35L36_AMP_OUT_MUTE: + case CS35L36_OTP_TRIM_STATUS: + case CS35L36_DISCH_FILT: + case CS35L36_PROTECT_REL_ERR: + case CS35L36_PAD_INTERFACE: + case CS35L36_PLL_CLK_CTRL: + case CS35L36_GLOBAL_CLK_CTRL: + case CS35L36_ADC_CLK_CTRL: + case CS35L36_SWIRE_CLK_CTRL: + case CS35L36_SP_SCLK_CLK_CTRL: + case CS35L36_TST_FS_MON0: + case CS35L36_MDSYNC_EN: + case CS35L36_MDSYNC_TX_ID: + case CS35L36_MDSYNC_PWR_CTRL: + case CS35L36_MDSYNC_DATA_TX: + case CS35L36_MDSYNC_TX_STATUS: + case CS35L36_MDSYNC_RX_STATUS: + case CS35L36_MDSYNC_ERR_STATUS: + case CS35L36_BSTCVRT_VCTRL1: + case CS35L36_BSTCVRT_VCTRL2: + case CS35L36_BSTCVRT_PEAK_CUR: + case CS35L36_BSTCVRT_SFT_RAMP: + case CS35L36_BSTCVRT_COEFF: + case CS35L36_BSTCVRT_SLOPE_LBST: + case CS35L36_BSTCVRT_SW_FREQ: + case CS35L36_BSTCVRT_DCM_CTRL: + case CS35L36_BSTCVRT_DCM_MODE_FORCE: + case CS35L36_BSTCVRT_OVERVOLT_CTRL: + case CS35L36_BST_TST_MANUAL: + case CS35L36_BST_ANA2_TEST: + case CS35L36_VPI_LIMIT_MODE: + case CS35L36_VPI_LIMIT_MINMAX: + case CS35L36_VPI_VP_THLD: + case CS35L36_VPI_TRACK_CTRL: + case CS35L36_VPI_TRIG_MODE_CTRL: + case CS35L36_VPI_TRIG_STEPS: + case CS35L36_VI_SPKMON_FILT: + case CS35L36_VI_SPKMON_GAIN: + case CS35L36_VI_SPKMON_IP_SEL: + case CS35L36_DTEMP_WARN_THLD: + case CS35L36_DTEMP_STATUS: + case CS35L36_VPVBST_FS_SEL: + case CS35L36_VPVBST_VP_CTRL: + case CS35L36_VPVBST_VBST_CTRL: + case CS35L36_ASP_TX_PIN_CTRL: + case CS35L36_ASP_RATE_CTRL: + case CS35L36_ASP_FORMAT: + case CS35L36_ASP_FRAME_CTRL: + case CS35L36_ASP_TX1_TX2_SLOT: + case CS35L36_ASP_TX3_TX4_SLOT: + case CS35L36_ASP_TX5_TX6_SLOT: + case CS35L36_ASP_TX7_TX8_SLOT: + case CS35L36_ASP_RX1_SLOT: + case CS35L36_ASP_RX_TX_EN: + case CS35L36_ASP_RX1_SEL: + case CS35L36_ASP_TX1_SEL: + case CS35L36_ASP_TX2_SEL: + case CS35L36_ASP_TX3_SEL: + case CS35L36_ASP_TX4_SEL: + case CS35L36_ASP_TX5_SEL: + case CS35L36_ASP_TX6_SEL: + case CS35L36_SWIRE_P1_TX1_SEL: + case CS35L36_SWIRE_P1_TX2_SEL: + case CS35L36_SWIRE_P2_TX1_SEL: + case CS35L36_SWIRE_P2_TX2_SEL: + case CS35L36_SWIRE_P2_TX3_SEL: + case CS35L36_SWIRE_DP1_FIFO_CFG: + case CS35L36_SWIRE_DP2_FIFO_CFG: + case CS35L36_SWIRE_DP3_FIFO_CFG: + case CS35L36_SWIRE_PCM_RX_DATA: + case CS35L36_SWIRE_FS_SEL: + case CS35L36_AMP_DIG_VOL_CTRL: + case CS35L36_VPBR_CFG: + case CS35L36_VBBR_CFG: + case CS35L36_VPBR_STATUS: + case CS35L36_VBBR_STATUS: + case CS35L36_OVERTEMP_CFG: + case CS35L36_AMP_ERR_VOL: + case CS35L36_CLASSH_CFG: + case CS35L36_CLASSH_FET_DRV_CFG: + case CS35L36_NG_CFG: + case CS35L36_AMP_GAIN_CTRL: + case CS35L36_PWM_MOD_IO_CTRL: + case CS35L36_PWM_MOD_STATUS: + case CS35L36_DAC_MSM_CFG: + case CS35L36_AMP_SLOPE_CTRL: + case CS35L36_AMP_PDM_VOLUME: + case CS35L36_AMP_PDM_RATE_CTRL: + case CS35L36_PDM_CH_SEL: + case CS35L36_AMP_NG_CTRL: + case CS35L36_PDM_HIGHFILT_CTRL: + case CS35L36_INT1_STATUS: + case CS35L36_INT2_STATUS: + case CS35L36_INT3_STATUS: + case CS35L36_INT4_STATUS: + case CS35L36_INT1_RAW_STATUS: + case CS35L36_INT2_RAW_STATUS: + case CS35L36_INT3_RAW_STATUS: + case CS35L36_INT4_RAW_STATUS: + case CS35L36_INT1_MASK: + case CS35L36_INT2_MASK: + case CS35L36_INT3_MASK: + case CS35L36_INT4_MASK: + case CS35L36_INT1_EDGE_LVL_CTRL: + case CS35L36_INT3_EDGE_LVL_CTRL: + case CS35L36_PAC_INT_STATUS: + case CS35L36_PAC_INT_RAW_STATUS: + case CS35L36_PAC_INT_FLUSH_CTRL: + case CS35L36_PAC_INT0_CTRL: + case CS35L36_PAC_INT1_CTRL: + case CS35L36_PAC_INT2_CTRL: + case CS35L36_PAC_INT3_CTRL: + case CS35L36_PAC_INT4_CTRL: + case CS35L36_PAC_INT5_CTRL: + case CS35L36_PAC_INT6_CTRL: + case CS35L36_PAC_INT7_CTRL: + return true; + default: + if (reg >= CS35L36_PAC_PMEM_WORD0 && + reg <= CS35L36_PAC_PMEM_WORD1023) + return true; + else + return false; + } +} + +static bool cs35l36_precious_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case CS35L36_TESTKEY_CTRL: + case CS35L36_USERKEY_CTL: + case CS35L36_TST_FS_MON0: + return true; + default: + return false; + } +} + +static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case CS35L36_SW_RESET: + case CS35L36_SW_REV: + case CS35L36_HW_REV: + case CS35L36_TESTKEY_CTRL: + case CS35L36_USERKEY_CTL: + case CS35L36_DEVICE_ID: + case CS35L36_FAB_ID: + case CS35L36_REV_ID: + case CS35L36_INT1_STATUS: + case CS35L36_INT2_STATUS: + case CS35L36_INT3_STATUS: + case CS35L36_INT4_STATUS: + case CS35L36_INT1_RAW_STATUS: + case CS35L36_INT2_RAW_STATUS: + case CS35L36_INT3_RAW_STATUS: + case CS35L36_INT4_RAW_STATUS: + case CS35L36_INT1_MASK: + case CS35L36_INT2_MASK: + case CS35L36_INT3_MASK: + case CS35L36_INT4_MASK: + case CS35L36_INT1_EDGE_LVL_CTRL: + case CS35L36_INT3_EDGE_LVL_CTRL: + case CS35L36_PAC_INT_STATUS: + case CS35L36_PAC_INT_RAW_STATUS: + case CS35L36_PAC_INT_FLUSH_CTRL: + return true; + default: + if (reg >= CS35L36_PAC_PMEM_WORD0 && + reg <= CS35L36_PAC_PMEM_WORD1023) + return true; + else + return false; + } +} + +static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0); +static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); + +static const char * const cs35l36_pcm_sftramp_text[] = { + "Off", ".5ms", "1ms", "2ms", "4ms", "8ms", "15ms", "30ms"}; + +static SOC_ENUM_SINGLE_DECL(pcm_sft_ramp, CS35L36_AMP_DIG_VOL_CTRL, 0, + cs35l36_pcm_sftramp_text); + +static int cs35l36_ldm_sel_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = + snd_soc_kcontrol_component(kcontrol); + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + + ucontrol->value.integer.value[0] = cs35l36->ldm_mode_sel; + + return 0; +} + +static int cs35l36_ldm_sel_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = + snd_soc_kcontrol_component(kcontrol); + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + int val = (ucontrol->value.integer.value[0]) ? CS35L36_NG_AMP_EN_MASK : + 0; + + cs35l36->ldm_mode_sel = val; + + regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG, + CS35L36_NG_AMP_EN_MASK, val); + + return 0; +} + +static const struct snd_kcontrol_new cs35l36_aud_controls[] = { + SOC_SINGLE_SX_TLV("Digital PCM Volume", CS35L36_AMP_DIG_VOL_CTRL, + 3, 0x4D0, 0x390, dig_vol_tlv), + SOC_SINGLE_TLV("Analog PCM Volume", CS35L36_AMP_GAIN_CTRL, 5, 0x13, 0, + amp_gain_tlv), + SOC_ENUM("PCM Soft Ramp", pcm_sft_ramp), + SOC_SINGLE("Amp Gain Zero-Cross Switch", CS35L36_AMP_GAIN_CTRL, + CS35L36_AMP_ZC_SHIFT, 1, 0), + SOC_SINGLE("PDM LDM Enter Ramp Switch", CS35L36_DAC_MSM_CFG, + CS35L36_PDM_LDM_ENTER_SHIFT, 1, 0), + SOC_SINGLE("PDM LDM Exit Ramp Switch", CS35L36_DAC_MSM_CFG, + CS35L36_PDM_LDM_EXIT_SHIFT, 1, 0), + SOC_SINGLE_BOOL_EXT("LDM Select Switch", 0, cs35l36_ldm_sel_get, + cs35l36_ldm_sel_put), +}; + +static int cs35l36_main_amp_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = + snd_soc_dapm_to_component(w->dapm); + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + u32 reg; + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL1, + CS35L36_GLOBAL_EN_MASK, + 1 << CS35L36_GLOBAL_EN_SHIFT); + + usleep_range(2000, 2100); + + regmap_read(cs35l36->regmap, CS35L36_INT4_RAW_STATUS, ®); + + if (WARN_ON_ONCE(reg & CS35L36_PLL_UNLOCK_MASK)) + dev_crit(cs35l36->dev, "PLL Unlocked\n"); + + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RX1_SEL, + CS35L36_PCM_RX_SEL_MASK, + CS35L36_PCM_RX_SEL_PCM); + regmap_update_bits(cs35l36->regmap, CS35L36_AMP_OUT_MUTE, + CS35L36_AMP_MUTE_MASK, + 0 << CS35L36_AMP_MUTE_SHIFT); + break; + case SND_SOC_DAPM_PRE_PMD: + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RX1_SEL, + CS35L36_PCM_RX_SEL_MASK, + CS35L36_PCM_RX_SEL_ZERO); + regmap_update_bits(cs35l36->regmap, CS35L36_AMP_OUT_MUTE, + CS35L36_AMP_MUTE_MASK, + 1 << CS35L36_AMP_MUTE_SHIFT); + break; + case SND_SOC_DAPM_POST_PMD: + regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL1, + CS35L36_GLOBAL_EN_MASK, + 0 << CS35L36_GLOBAL_EN_SHIFT); + + usleep_range(2000, 2100); + break; + default: + dev_dbg(component->dev, "Invalid event = 0x%x\n", event); + return -EINVAL; + } + + return 0; +} + +static int cs35l36_boost_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = + snd_soc_dapm_to_component(w->dapm); + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + if (!cs35l36->pdata.extern_boost) + regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL2, + CS35L36_BST_EN_MASK, + CS35L36_BST_EN << + CS35L36_BST_EN_SHIFT); + break; + case SND_SOC_DAPM_POST_PMD: + if (!cs35l36->pdata.extern_boost) + regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL2, + CS35L36_BST_EN_MASK, + CS35L36_BST_DIS_VP << + CS35L36_BST_EN_SHIFT); + break; + default: + dev_dbg(component->dev, "Invalid event = 0x%x\n", event); + return -EINVAL; + } + + return 0; +} + +static const char * const cs35l36_chan_text[] = { + "RX1", + "RX2", +}; + +static SOC_ENUM_SINGLE_DECL(chansel_enum, CS35L36_ASP_RX1_SLOT, 0, + cs35l36_chan_text); + +static const struct snd_kcontrol_new cs35l36_chan_mux = + SOC_DAPM_ENUM("Input Mux", chansel_enum); + +static const struct snd_kcontrol_new amp_enable_ctrl = + SOC_DAPM_SINGLE_AUTODISABLE("Switch", CS35L36_AMP_OUT_MUTE, + CS35L36_AMP_MUTE_SHIFT, 1, 1); + +static const struct snd_kcontrol_new boost_ctrl = + SOC_DAPM_SINGLE_VIRT("Switch", 1); + +static const char * const asp_tx_src_text[] = { + "Zero Fill", "ASPRX1", "VMON", "IMON", "ERRVOL", "VPMON", "VBSTMON" +}; + +static const unsigned int asp_tx_src_values[] = { + 0x00, 0x08, 0x18, 0x19, 0x20, 0x28, 0x29 +}; + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx1_src_enum, CS35L36_ASP_TX1_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx1_src = + SOC_DAPM_ENUM("ASPTX1SRC", asp_tx1_src_enum); + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx2_src_enum, CS35L36_ASP_TX2_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx2_src = + SOC_DAPM_ENUM("ASPTX2SRC", asp_tx2_src_enum); + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx3_src_enum, CS35L36_ASP_TX3_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx3_src = + SOC_DAPM_ENUM("ASPTX3SRC", asp_tx3_src_enum); + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx4_src_enum, CS35L36_ASP_TX4_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx4_src = + SOC_DAPM_ENUM("ASPTX4SRC", asp_tx4_src_enum); + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx5_src_enum, CS35L36_ASP_TX5_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx5_src = + SOC_DAPM_ENUM("ASPTX5SRC", asp_tx5_src_enum); + +static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx6_src_enum, CS35L36_ASP_TX6_SEL, 0, + CS35L36_APS_TX_SEL_MASK, asp_tx_src_text, + asp_tx_src_values); + +static const struct snd_kcontrol_new asp_tx6_src = + SOC_DAPM_ENUM("ASPTX6SRC", asp_tx6_src_enum); + +static const struct snd_soc_dapm_widget cs35l36_dapm_widgets[] = { + SND_SOC_DAPM_MUX("Channel Mux", SND_SOC_NOPM, 0, 0, &cs35l36_chan_mux), + SND_SOC_DAPM_AIF_IN("SDIN", NULL, 0, CS35L36_ASP_RX_TX_EN, 16, 0), + + SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L36_PWR_CTRL2, 0, 0, NULL, 0, + cs35l36_main_amp_event, SND_SOC_DAPM_POST_PMD | + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + + SND_SOC_DAPM_OUTPUT("SPK"), + SND_SOC_DAPM_SWITCH("AMP Enable", SND_SOC_NOPM, 0, 1, &_enable_ctrl), + SND_SOC_DAPM_MIXER("CLASS H", CS35L36_PWR_CTRL3, 4, 0, NULL, 0), + SND_SOC_DAPM_SWITCH_E("BOOST Enable", SND_SOC_NOPM, 0, 0, &boost_ctrl, + cs35l36_boost_event, SND_SOC_DAPM_POST_PMD | + SND_SOC_DAPM_POST_PMU), + + SND_SOC_DAPM_AIF_OUT("ASPTX1", NULL, 0, CS35L36_ASP_RX_TX_EN, 0, 0), + SND_SOC_DAPM_AIF_OUT("ASPTX2", NULL, 1, CS35L36_ASP_RX_TX_EN, 1, 0), + SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 2, CS35L36_ASP_RX_TX_EN, 2, 0), + SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 3, CS35L36_ASP_RX_TX_EN, 3, 0), + SND_SOC_DAPM_AIF_OUT("ASPTX5", NULL, 4, CS35L36_ASP_RX_TX_EN, 4, 0), + SND_SOC_DAPM_AIF_OUT("ASPTX6", NULL, 5, CS35L36_ASP_RX_TX_EN, 5, 0), + + SND_SOC_DAPM_MUX("ASPTX1SRC", SND_SOC_NOPM, 0, 0, &asp_tx1_src), + SND_SOC_DAPM_MUX("ASPTX2SRC", SND_SOC_NOPM, 0, 0, &asp_tx2_src), + SND_SOC_DAPM_MUX("ASPTX3SRC", SND_SOC_NOPM, 0, 0, &asp_tx3_src), + SND_SOC_DAPM_MUX("ASPTX4SRC", SND_SOC_NOPM, 0, 0, &asp_tx4_src), + SND_SOC_DAPM_MUX("ASPTX5SRC", SND_SOC_NOPM, 0, 0, &asp_tx5_src), + SND_SOC_DAPM_MUX("ASPTX6SRC", SND_SOC_NOPM, 0, 0, &asp_tx6_src), + + SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L36_PWR_CTRL2, 12, 0), + SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L36_PWR_CTRL2, 13, 0), + SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L36_PWR_CTRL2, 8, 0), + SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L36_PWR_CTRL2, 9, 0), + + SND_SOC_DAPM_INPUT("VP"), + SND_SOC_DAPM_INPUT("VBST"), + SND_SOC_DAPM_INPUT("VSENSE"), +}; + +static const struct snd_soc_dapm_route cs35l36_audio_map[] = { + {"VPMON ADC", NULL, "VP"}, + {"VBSTMON ADC", NULL, "VBST"}, + {"IMON ADC", NULL, "VSENSE"}, + {"VMON ADC", NULL, "VSENSE"}, + + {"ASPTX1SRC", "IMON", "IMON ADC"}, + {"ASPTX1SRC", "VMON", "VMON ADC"}, + {"ASPTX1SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX1SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX2SRC", "IMON", "IMON ADC"}, + {"ASPTX2SRC", "VMON", "VMON ADC"}, + {"ASPTX2SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX2SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX3SRC", "IMON", "IMON ADC"}, + {"ASPTX3SRC", "VMON", "VMON ADC"}, + {"ASPTX3SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX3SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX4SRC", "IMON", "IMON ADC"}, + {"ASPTX4SRC", "VMON", "VMON ADC"}, + {"ASPTX4SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX4SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX5SRC", "IMON", "IMON ADC"}, + {"ASPTX5SRC", "VMON", "VMON ADC"}, + {"ASPTX5SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX5SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX6SRC", "IMON", "IMON ADC"}, + {"ASPTX6SRC", "VMON", "VMON ADC"}, + {"ASPTX6SRC", "VBSTMON", "VBSTMON ADC"}, + {"ASPTX6SRC", "VPMON", "VPMON ADC"}, + + {"ASPTX1", NULL, "ASPTX1SRC"}, + {"ASPTX2", NULL, "ASPTX2SRC"}, + {"ASPTX3", NULL, "ASPTX3SRC"}, + {"ASPTX4", NULL, "ASPTX4SRC"}, + {"ASPTX5", NULL, "ASPTX5SRC"}, + {"ASPTX6", NULL, "ASPTX6SRC"}, + + {"AMP Capture", NULL, "ASPTX1"}, + {"AMP Capture", NULL, "ASPTX2"}, + {"AMP Capture", NULL, "ASPTX3"}, + {"AMP Capture", NULL, "ASPTX4"}, + {"AMP Capture", NULL, "ASPTX5"}, + {"AMP Capture", NULL, "ASPTX6"}, + + {"AMP Enable", "Switch", "AMP Playback"}, + {"SDIN", NULL, "AMP Enable"}, + {"Channel Mux", "RX1", "SDIN"}, + {"Channel Mux", "RX2", "SDIN"}, + {"BOOST Enable", "Switch", "Channel Mux"}, + {"CLASS H", NULL, "BOOST Enable"}, + {"Main AMP", NULL, "Channel Mux"}, + {"Main AMP", NULL, "CLASS H"}, + {"SPK", NULL, "Main AMP"}, +}; + +static int cs35l36_set_dai_fmt(struct snd_soc_dai *component_dai, + unsigned int fmt) +{ + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component_dai->component); + unsigned int asp_fmt, lrclk_fmt, sclk_fmt, slave_mode, clk_frc; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + slave_mode = 1; + break; + case SND_SOC_DAIFMT_CBS_CFS: + slave_mode = 0; + break; + default: + return -EINVAL; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL, + CS35L36_SCLK_MSTR_MASK, + slave_mode << CS35L36_SCLK_MSTR_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL, + CS35L36_LRCLK_MSTR_MASK, + slave_mode << CS35L36_LRCLK_MSTR_SHIFT); + + switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) { + case SND_SOC_DAIFMT_CONT: + clk_frc = 1; + break; + case SND_SOC_DAIFMT_GATED: + clk_frc = 0; + break; + default: + return -EINVAL; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL, + CS35L36_SCLK_FRC_MASK, clk_frc << + CS35L36_SCLK_FRC_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL, + CS35L36_LRCLK_FRC_MASK, clk_frc << + CS35L36_LRCLK_FRC_SHIFT); + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: + asp_fmt = 0; + break; + case SND_SOC_DAIFMT_I2S: + asp_fmt = 2; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_IF: + lrclk_fmt = 1; + sclk_fmt = 0; + break; + case SND_SOC_DAIFMT_IB_NF: + lrclk_fmt = 0; + sclk_fmt = 1; + break; + case SND_SOC_DAIFMT_IB_IF: + lrclk_fmt = 1; + sclk_fmt = 1; + break; + case SND_SOC_DAIFMT_NB_NF: + lrclk_fmt = 0; + sclk_fmt = 0; + break; + default: + return -EINVAL; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL, + CS35L36_LRCLK_INV_MASK, + lrclk_fmt << CS35L36_LRCLK_INV_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL, + CS35L36_SCLK_INV_MASK, + sclk_fmt << CS35L36_SCLK_INV_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FORMAT, + CS35L36_ASP_FMT_MASK, asp_fmt); + + return 0; +} + +struct cs35l36_global_fs_config { + int rate; + int fs_cfg; +}; + +static const struct cs35l36_global_fs_config cs35l36_fs_rates[] = { + {12000, 0x01}, + {24000, 0x02}, + {48000, 0x03}, + {96000, 0x04}, + {192000, 0x05}, + {384000, 0x06}, + {11025, 0x09}, + {22050, 0x0A}, + {44100, 0x0B}, + {88200, 0x0C}, + {176400, 0x0D}, + {8000, 0x11}, + {16000, 0x12}, + {32000, 0x13}, +}; + +static int cs35l36_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(dai->component); + unsigned int asp_width, global_fs = params_rate(params); + int i; + + for (i = 0; i < ARRAY_SIZE(cs35l36_fs_rates); i++) { + if (global_fs == cs35l36_fs_rates[i].rate) + regmap_update_bits(cs35l36->regmap, + CS35L36_GLOBAL_CLK_CTRL, + CS35L36_GLOBAL_FS_MASK, + cs35l36_fs_rates[i].fs_cfg << + CS35L36_GLOBAL_FS_SHIFT); + } + + switch (params_width(params)) { + case 16: + asp_width = CS35L36_ASP_WIDTH_16; + break; + case 24: + asp_width = CS35L36_ASP_WIDTH_24; + break; + case 32: + asp_width = CS35L36_ASP_WIDTH_32; + break; + default: + return -EINVAL; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FRAME_CTRL, + CS35L36_ASP_RX_WIDTH_MASK, + asp_width << CS35L36_ASP_RX_WIDTH_SHIFT); + } else { + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FRAME_CTRL, + CS35L36_ASP_TX_WIDTH_MASK, + asp_width << CS35L36_ASP_TX_WIDTH_SHIFT); + } + + return 0; +} + +static int cs35l36_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, + unsigned int freq, int dir) +{ + struct snd_soc_component *component = dai->component; + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + int fs1, fs2; + + if (freq > CS35L36_FS_NOM_6MHZ) { + fs1 = CS35L36_FS1_DEFAULT_VAL; + fs2 = CS35L36_FS2_DEFAULT_VAL; + } else { + fs1 = 3 * ((CS35L36_FS_NOM_6MHZ * 4 + freq - 1) / freq) + 4; + fs2 = 5 * ((CS35L36_FS_NOM_6MHZ * 4 + freq - 1) / freq) + 4; + } + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK2); + + regmap_update_bits(cs35l36->regmap, CS35L36_TST_FS_MON0, + CS35L36_FS1_WINDOW_MASK | CS35L36_FS2_WINDOW_MASK, + fs1 | (fs2 << CS35L36_FS2_WINDOW_SHIFT)); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK2); + return 0; +} + +static const struct cs35l36_pll_config *cs35l36_get_clk_config( + struct cs35l36_private *cs35l36, int freq) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cs35l36_pll_sysclk); i++) { + if (cs35l36_pll_sysclk[i].freq == freq) + return &cs35l36_pll_sysclk[i]; + } + + return NULL; +} + +static const unsigned int cs35l36_src_rates[] = { + 8000, 12000, 11025, 16000, 22050, 24000, 32000, + 44100, 48000, 88200, 96000, 176400, 192000, 384000 +}; + +static const struct snd_pcm_hw_constraint_list cs35l36_constraints = { + .count = ARRAY_SIZE(cs35l36_src_rates), + .list = cs35l36_src_rates, +}; + +static int cs35l36_pcm_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, &cs35l36_constraints); + + return 0; +} + +static const struct snd_soc_dai_ops cs35l36_ops = { + .startup = cs35l36_pcm_startup, + .set_fmt = cs35l36_set_dai_fmt, + .hw_params = cs35l36_pcm_hw_params, + .set_sysclk = cs35l36_dai_set_sysclk, +}; + +static struct snd_soc_dai_driver cs35l36_dai[] = { + { + .name = "cs35l36-pcm", + .id = 0, + .playback = { + .stream_name = "AMP Playback", + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_KNOT, + .formats = CS35L36_RX_FORMATS, + }, + .capture = { + .stream_name = "AMP Capture", + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_KNOT, + .formats = CS35L36_TX_FORMATS, + }, + .ops = &cs35l36_ops, + .symmetric_rates = 1, + }, +}; + +static int cs35l36_component_set_sysclk(struct snd_soc_component *component, + int clk_id, int source, unsigned int freq, + int dir) +{ + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + const struct cs35l36_pll_config *clk_cfg; + int prev_clksrc; + bool pdm_switch; + + prev_clksrc = cs35l36->clksrc; + + switch (clk_id) { + case 0: + cs35l36->clksrc = CS35L36_PLLSRC_SCLK; + break; + case 1: + cs35l36->clksrc = CS35L36_PLLSRC_LRCLK; + break; + case 2: + cs35l36->clksrc = CS35L36_PLLSRC_PDMCLK; + break; + case 3: + cs35l36->clksrc = CS35L36_PLLSRC_SELF; + break; + case 4: + cs35l36->clksrc = CS35L36_PLLSRC_MCLK; + break; + default: + return -EINVAL; + } + + clk_cfg = cs35l36_get_clk_config(cs35l36, freq); + if (clk_cfg == NULL) { + dev_err(component->dev, "Invalid CLK Config Freq: %d\n", freq); + return -EINVAL; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_PLL_OPENLOOP_MASK, + 1 << CS35L36_PLL_OPENLOOP_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_REFCLK_FREQ_MASK, + clk_cfg->clk_cfg << CS35L36_REFCLK_FREQ_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_PLL_REFCLK_EN_MASK, + 0 << CS35L36_PLL_REFCLK_EN_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_PLL_CLK_SEL_MASK, + cs35l36->clksrc); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_PLL_OPENLOOP_MASK, + 0 << CS35L36_PLL_OPENLOOP_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL, + CS35L36_PLL_REFCLK_EN_MASK, + 1 << CS35L36_PLL_REFCLK_EN_SHIFT); + + if (cs35l36->rev_id == CS35L36_REV_A0) { + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK2); + + regmap_write(cs35l36->regmap, CS35L36_DCO_CTRL, 0x00036DA8); + regmap_write(cs35l36->regmap, CS35L36_MISC_CTRL, 0x0100EE0E); + + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_LOOP_PARAMS, + CS35L36_PLL_IGAIN_MASK, + CS35L36_PLL_IGAIN << + CS35L36_PLL_IGAIN_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_PLL_LOOP_PARAMS, + CS35L36_PLL_FFL_IGAIN_MASK, + clk_cfg->fll_igain); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK2); + } + + if (cs35l36->clksrc == CS35L36_PLLSRC_PDMCLK) { + pdm_switch = cs35l36->ldm_mode_sel && + (prev_clksrc != CS35L36_PLLSRC_PDMCLK); + + if (pdm_switch) + regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG, + CS35L36_NG_DELAY_MASK, + 0 << CS35L36_NG_DELAY_SHIFT); + + regmap_update_bits(cs35l36->regmap, CS35L36_DAC_MSM_CFG, + CS35L36_PDM_MODE_MASK, + 1 << CS35L36_PDM_MODE_SHIFT); + + if (pdm_switch) + regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG, + CS35L36_NG_DELAY_MASK, + 3 << CS35L36_NG_DELAY_SHIFT); + } else { + pdm_switch = cs35l36->ldm_mode_sel && + (prev_clksrc == CS35L36_PLLSRC_PDMCLK); + + if (pdm_switch) + regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG, + CS35L36_NG_DELAY_MASK, + 0 << CS35L36_NG_DELAY_SHIFT); + + regmap_update_bits(cs35l36->regmap, CS35L36_DAC_MSM_CFG, + CS35L36_PDM_MODE_MASK, + 0 << CS35L36_PDM_MODE_SHIFT); + + if (pdm_switch) + regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG, + CS35L36_NG_DELAY_MASK, + 3 << CS35L36_NG_DELAY_SHIFT); + } + + return 0; +} + +static int cs35l36_boost_inductor(struct cs35l36_private *cs35l36, int inductor) +{ + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_COEFF, + CS35L36_BSTCVRT_K1_MASK, 0x3C); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_COEFF, + CS35L36_BSTCVRT_K2_MASK, + 0x3C << CS35L36_BSTCVRT_K2_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SW_FREQ, + CS35L36_BSTCVRT_CCMFREQ_MASK, 0x00); + + switch (inductor) { + case 1000: /* 1 uH */ + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST, + CS35L36_BSTCVRT_SLOPE_MASK, + 0x75 << CS35L36_BSTCVRT_SLOPE_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST, + CS35L36_BSTCVRT_LBSTVAL_MASK, 0x00); + break; + case 1200: /* 1.2 uH */ + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST, + CS35L36_BSTCVRT_SLOPE_MASK, + 0x6B << CS35L36_BSTCVRT_SLOPE_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST, + CS35L36_BSTCVRT_LBSTVAL_MASK, 0x01); + break; + default: + dev_err(cs35l36->dev, "%s Invalid Inductor Value %d uH\n", + __func__, inductor); + return -EINVAL; + } + + return 0; +} + +static int cs35l36_component_probe(struct snd_soc_component *component) +{ + struct cs35l36_private *cs35l36 = + snd_soc_component_get_drvdata(component); + int ret = 0; + + if ((cs35l36->rev_id == CS35L36_REV_A0) && cs35l36->pdata.dcm_mode) { + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_DCM_CTRL, + CS35L36_DCM_AUTO_MASK, + CS35L36_DCM_AUTO_MASK); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK2); + + regmap_update_bits(cs35l36->regmap, CS35L36_BST_TST_MANUAL, + CS35L36_BST_MAN_IPKCOMP_MASK, + 0 << CS35L36_BST_MAN_IPKCOMP_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BST_TST_MANUAL, + CS35L36_BST_MAN_IPKCOMP_EN_MASK, + CS35L36_BST_MAN_IPKCOMP_EN_MASK); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK2); + } + + if (cs35l36->pdata.amp_pcm_inv) + regmap_update_bits(cs35l36->regmap, CS35L36_AMP_DIG_VOL_CTRL, + CS35L36_AMP_PCM_INV_MASK, + CS35L36_AMP_PCM_INV_MASK); + + if (cs35l36->pdata.multi_amp_mode) + regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL, + CS35L36_ASP_TX_HIZ_MASK, + CS35L36_ASP_TX_HIZ_MASK); + + if (cs35l36->pdata.imon_pol_inv) + regmap_update_bits(cs35l36->regmap, CS35L36_VI_SPKMON_FILT, + CS35L36_IMON_POL_MASK, 0); + + if (cs35l36->pdata.vmon_pol_inv) + regmap_update_bits(cs35l36->regmap, CS35L36_VI_SPKMON_FILT, + CS35L36_VMON_POL_MASK, 0); + + if (cs35l36->pdata.bst_vctl) + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL1, + CS35L35_BSTCVRT_CTL_MASK, + cs35l36->pdata.bst_vctl); + + if (cs35l36->pdata.bst_vctl_sel) + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL2, + CS35L35_BSTCVRT_CTL_SEL_MASK, + cs35l36->pdata.bst_vctl_sel); + + if (cs35l36->pdata.bst_ipk) + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_PEAK_CUR, + CS35L36_BST_IPK_MASK, + cs35l36->pdata.bst_ipk); + + if (cs35l36->pdata.boost_ind) { + ret = cs35l36_boost_inductor(cs35l36, cs35l36->pdata.boost_ind); + if (ret < 0) { + dev_err(cs35l36->dev, + "Boost inductor config failed(%d)\n", ret); + return ret; + } + } + + if (cs35l36->pdata.temp_warn_thld) + regmap_update_bits(cs35l36->regmap, CS35L36_DTEMP_WARN_THLD, + CS35L36_TEMP_THLD_MASK, + cs35l36->pdata.temp_warn_thld); + + if (cs35l36->pdata.irq_drv_sel) + regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE, + CS35L36_INT_DRV_SEL_MASK, + cs35l36->pdata.irq_drv_sel << + CS35L36_INT_DRV_SEL_SHIFT); + + if (cs35l36->pdata.irq_gpio_sel) + regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE, + CS35L36_INT_GPIO_SEL_MASK, + cs35l36->pdata.irq_gpio_sel << + CS35L36_INT_GPIO_SEL_SHIFT); + + /* + * Rev B0 has 2 versions + * L36 is 10V + * L37 is 12V + * If L36 we need to clamp some values for safety + * after probe has setup dt values. We want to make + * sure we dont miss any values set in probe + */ + if (cs35l36->chip_version == CS35L36_10V_L36) { + regmap_update_bits(cs35l36->regmap, + CS35L36_BSTCVRT_OVERVOLT_CTRL, + CS35L36_BST_OVP_THLD_MASK, + CS35L36_BST_OVP_THLD_11V); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK2); + + regmap_update_bits(cs35l36->regmap, CS35L36_BST_ANA2_TEST, + CS35L36_BST_OVP_TRIM_MASK, + CS35L36_BST_OVP_TRIM_11V << + CS35L36_BST_OVP_TRIM_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL2, + CS35L36_BST_CTRL_LIM_MASK, + 1 << CS35L36_BST_CTRL_LIM_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL1, + CS35L35_BSTCVRT_CTL_MASK, + CS35L36_BST_CTRL_10V_CLAMP); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK2); + } + + /* + * RevA and B require the disabling of + * SYNC_GLOBAL_OVR when GLOBAL_EN = 0. + * Just turn it off from default + */ + regmap_update_bits(cs35l36->regmap, CS35L36_CTRL_OVRRIDE, + CS35L36_SYNC_GLOBAL_OVR_MASK, + 0 << CS35L36_SYNC_GLOBAL_OVR_SHIFT); + + return 0; +} + +static const struct snd_soc_component_driver soc_component_dev_cs35l36 = { + .probe = &cs35l36_component_probe, + .set_sysclk = cs35l36_component_set_sysclk, + .dapm_widgets = cs35l36_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(cs35l36_dapm_widgets), + .dapm_routes = cs35l36_audio_map, + .num_dapm_routes = ARRAY_SIZE(cs35l36_audio_map), + .controls = cs35l36_aud_controls, + .num_controls = ARRAY_SIZE(cs35l36_aud_controls), + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, + .non_legacy_dai_naming = 1, +}; + +static struct regmap_config cs35l36_regmap = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = CS35L36_PAC_PMEM_WORD1023, + .reg_defaults = cs35l36_reg, + .num_reg_defaults = ARRAY_SIZE(cs35l36_reg), + .precious_reg = cs35l36_precious_reg, + .volatile_reg = cs35l36_volatile_reg, + .readable_reg = cs35l36_readable_reg, + .cache_type = REGCACHE_RBTREE, +}; + +static irqreturn_t cs35l36_irq(int irq, void *data) +{ + struct cs35l36_private *cs35l36 = data; + unsigned int status[4]; + unsigned int masks[4]; + int ret = IRQ_NONE; + + /* ack the irq by reading all status registers */ + regmap_bulk_read(cs35l36->regmap, CS35L36_INT1_STATUS, status, + ARRAY_SIZE(status)); + + regmap_bulk_read(cs35l36->regmap, CS35L36_INT1_MASK, masks, + ARRAY_SIZE(masks)); + + /* Check to see if unmasked bits are active */ + if (!(status[0] & ~masks[0]) && !(status[1] & ~masks[1]) && + !(status[2] & ~masks[2]) && !(status[3] & ~masks[3])) { + return IRQ_NONE; + } + + /* + * The following interrupts require a + * protection release cycle to get the + * speaker out of Safe-Mode. + */ + if (status[2] & CS35L36_AMP_SHORT_ERR) { + dev_crit(cs35l36->dev, "Amp short error\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_AMP_SHORT_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_AMP_SHORT_ERR_RLS, + CS35L36_AMP_SHORT_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_AMP_SHORT_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT3_STATUS, + CS35L36_AMP_SHORT_ERR, + CS35L36_AMP_SHORT_ERR); + ret = IRQ_HANDLED; + } + + if (status[0] & CS35L36_TEMP_WARN) { + dev_crit(cs35l36->dev, "Over temperature warning\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_WARN_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_WARN_ERR_RLS, + CS35L36_TEMP_WARN_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_WARN_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS, + CS35L36_TEMP_WARN, CS35L36_TEMP_WARN); + ret = IRQ_HANDLED; + } + + if (status[0] & CS35L36_TEMP_ERR) { + dev_crit(cs35l36->dev, "Over temperature error\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, CS35L36_TEMP_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS, + CS35L36_TEMP_ERR, CS35L36_TEMP_ERR); + ret = IRQ_HANDLED; + } + + if (status[0] & CS35L36_BST_OVP_ERR) { + dev_crit(cs35l36->dev, "VBST Over Voltage error\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, CS35L36_TEMP_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_TEMP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS, + CS35L36_BST_OVP_ERR, CS35L36_BST_OVP_ERR); + ret = IRQ_HANDLED; + } + + if (status[0] & CS35L36_BST_DCM_UVP_ERR) { + dev_crit(cs35l36->dev, "DCM VBST Under Voltage Error\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_UVP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_UVP_ERR_RLS, + CS35L36_BST_UVP_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_UVP_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS, + CS35L36_BST_DCM_UVP_ERR, + CS35L36_BST_DCM_UVP_ERR); + ret = IRQ_HANDLED; + } + + if (status[0] & CS35L36_BST_SHORT_ERR) { + dev_crit(cs35l36->dev, "LBST SHORT error!\n"); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_SHORT_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_SHORT_ERR_RLS, + CS35L36_BST_SHORT_ERR_RLS); + regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR, + CS35L36_BST_SHORT_ERR_RLS, 0); + regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS, + CS35L36_BST_SHORT_ERR, + CS35L36_BST_SHORT_ERR); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int cs35l36_handle_of_data(struct i2c_client *i2c_client, + struct cs35l36_platform_data *pdata) +{ + struct device_node *np = i2c_client->dev.of_node; + struct cs35l36_vpbr_cfg *vpbr_config = &pdata->vpbr_config; + struct device_node *vpbr_node; + unsigned int val; + int ret; + + if (!np) + return 0; + + ret = of_property_read_u32(np, "cirrus,boost-ctl-millivolt", &val); + if (!ret) { + if (val < 2550 || val > 12000) { + dev_err(&i2c_client->dev, + "Invalid Boost Voltage %d mV\n", val); + return -EINVAL; + } + pdata->bst_vctl = (((val - 2550) / 100) + 1) << 1; + } else { + dev_err(&i2c_client->dev, + "Unable to find required parameter 'cirrus,boost-ctl-millivolt'"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "cirrus,boost-ctl-select", &val); + if (!ret) + pdata->bst_vctl_sel = val | CS35L36_VALID_PDATA; + + ret = of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val); + if (!ret) { + if (val < 1600 || val > 4500) { + dev_err(&i2c_client->dev, + "Invalid Boost Peak Current %u mA\n", val); + return -EINVAL; + } + + pdata->bst_ipk = (val - 1600) / 50; + } else { + dev_err(&i2c_client->dev, + "Unable to find required parameter 'cirrus,boost-peak-milliamp'"); + return -EINVAL; + } + + pdata->multi_amp_mode = of_property_read_bool(np, + "cirrus,multi-amp-mode"); + + pdata->dcm_mode = of_property_read_bool(np, + "cirrus,dcm-mode-enable"); + + pdata->amp_pcm_inv = of_property_read_bool(np, + "cirrus,amp-pcm-inv"); + + pdata->imon_pol_inv = of_property_read_bool(np, + "cirrus,imon-pol-inv"); + + pdata->vmon_pol_inv = of_property_read_bool(np, + "cirrus,vmon-pol-inv"); + + if (of_property_read_u32(np, "cirrus,temp-warn-threshold", &val) >= 0) + pdata->temp_warn_thld = val | CS35L36_VALID_PDATA; + + if (of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val) >= 0) { + pdata->boost_ind = val; + } else { + dev_err(&i2c_client->dev, "Inductor not specified.\n"); + return -EINVAL; + } + + if (of_property_read_u32(np, "cirrus,irq-drive-select", &val) >= 0) + pdata->irq_drv_sel = val | CS35L36_VALID_PDATA; + + if (of_property_read_u32(np, "cirrus,irq-gpio-select", &val) >= 0) + pdata->irq_gpio_sel = val | CS35L36_VALID_PDATA; + + /* VPBR Config */ + vpbr_node = of_get_child_by_name(np, "cirrus,vpbr-config"); + vpbr_config->is_present = vpbr_node ? true : false; + if (vpbr_config->is_present) { + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-en", + &val) >= 0) + vpbr_config->vpbr_en = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-thld", + &val) >= 0) + vpbr_config->vpbr_thld = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-atk-rate", + &val) >= 0) + vpbr_config->vpbr_atk_rate = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-atk-vol", + &val) >= 0) + vpbr_config->vpbr_atk_vol = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-max-attn", + &val) >= 0) + vpbr_config->vpbr_max_attn = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-wait", + &val) >= 0) + vpbr_config->vpbr_wait = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-rel-rate", + &val) >= 0) + vpbr_config->vpbr_rel_rate = val; + if (of_property_read_u32(vpbr_node, "cirrus,vpbr-mute-en", + &val) >= 0) + vpbr_config->vpbr_mute_en = val; + } + of_node_put(vpbr_node); + + return 0; +} + +static int cs35l36_pac(struct cs35l36_private *cs35l36) +{ + int ret, count; + unsigned int val; + + if (cs35l36->rev_id != CS35L36_REV_B0) + return 0; + + /* + * Magic code for internal PAC + */ + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_UNLOCK2); + + usleep_range(9500, 10500); + + regmap_write(cs35l36->regmap, CS35L36_PAC_CTL1, + CS35L36_PAC_RESET); + regmap_write(cs35l36->regmap, CS35L36_PAC_CTL3, + CS35L36_PAC_MEM_ACCESS); + regmap_write(cs35l36->regmap, CS35L36_PAC_PMEM_WORD0, + CS35L36_B0_PAC_PATCH); + + regmap_write(cs35l36->regmap, CS35L36_PAC_CTL3, + CS35L36_PAC_MEM_ACCESS_CLR); + regmap_write(cs35l36->regmap, CS35L36_PAC_CTL1, + CS35L36_PAC_ENABLE_MASK); + + usleep_range(9500, 10500); + + ret = regmap_read(cs35l36->regmap, CS35L36_INT4_STATUS, &val); + if (ret < 0) { + dev_err(cs35l36->dev, "Failed to read int4_status %d\n", ret); + return ret; + } + + count = 0; + while (!(val & CS35L36_MCU_CONFIG_CLR)) { + usleep_range(100, 200); + count++; + + ret = regmap_read(cs35l36->regmap, CS35L36_INT4_STATUS, + &val); + if (ret < 0) { + dev_err(cs35l36->dev, "Failed to read int4_status %d\n", + ret); + return ret; + } + + if (count >= 100) + return -EINVAL; + } + + regmap_write(cs35l36->regmap, CS35L36_INT4_STATUS, + CS35L36_MCU_CONFIG_CLR); + regmap_update_bits(cs35l36->regmap, CS35L36_PAC_CTL1, + CS35L36_PAC_ENABLE_MASK, 0); + + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK1); + regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL, + CS35L36_TEST_LOCK2); + + return 0; +} + +static void cs35l36_apply_vpbr_config(struct cs35l36_private *cs35l36) +{ + struct cs35l36_platform_data *pdata = &cs35l36->pdata; + struct cs35l36_vpbr_cfg *vpbr_config = &pdata->vpbr_config; + + regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL3, + CS35L36_VPBR_EN_MASK, + vpbr_config->vpbr_en << + CS35L36_VPBR_EN_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_THLD_MASK, + vpbr_config->vpbr_thld << + CS35L36_VPBR_THLD_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_MAX_ATTN_MASK, + vpbr_config->vpbr_max_attn << + CS35L36_VPBR_MAX_ATTN_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_ATK_VOL_MASK, + vpbr_config->vpbr_atk_vol << + CS35L36_VPBR_ATK_VOL_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_ATK_RATE_MASK, + vpbr_config->vpbr_atk_rate << + CS35L36_VPBR_ATK_RATE_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_WAIT_MASK, + vpbr_config->vpbr_wait << + CS35L36_VPBR_WAIT_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_REL_RATE_MASK, + vpbr_config->vpbr_rel_rate << + CS35L36_VPBR_REL_RATE_SHIFT); + regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG, + CS35L36_VPBR_MUTE_EN_MASK, + vpbr_config->vpbr_mute_en << + CS35L36_VPBR_MUTE_EN_SHIFT); +} + +static const struct reg_sequence cs35l36_reva0_errata_patch[] = { + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK1 }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK2 }, + /* Errata Writes */ + { CS35L36_OTP_CTRL1, 0x00002060 }, + { CS35L36_OTP_CTRL2, 0x00000001 }, + { CS35L36_OTP_CTRL1, 0x00002460 }, + { CS35L36_OTP_CTRL2, 0x00000001 }, + { 0x00002088, 0x012A1838 }, + { 0x00003014, 0x0100EE0E }, + { 0x00003008, 0x0008184A }, + { 0x00007418, 0x509001C8 }, + { 0x00007064, 0x0929A800 }, + { 0x00002D10, 0x0002C01C }, + { 0x0000410C, 0x00000A11 }, + { 0x00006E08, 0x8B19140C }, + { 0x00006454, 0x0300000A }, + { CS35L36_AMP_NG_CTRL, 0x000020EF }, + { 0x00007E34, 0x0000000E }, + { 0x0000410C, 0x00000A11 }, + { 0x00007410, 0x20514B00 }, + /* PAC Config */ + { CS35L36_CTRL_OVRRIDE, 0x00000000 }, + { CS35L36_PAC_INT0_CTRL, 0x00860001 }, + { CS35L36_PAC_INT1_CTRL, 0x00860001 }, + { CS35L36_PAC_INT2_CTRL, 0x00860001 }, + { CS35L36_PAC_INT3_CTRL, 0x00860001 }, + { CS35L36_PAC_INT4_CTRL, 0x00860001 }, + { CS35L36_PAC_INT5_CTRL, 0x00860001 }, + { CS35L36_PAC_INT6_CTRL, 0x00860001 }, + { CS35L36_PAC_INT7_CTRL, 0x00860001 }, + { CS35L36_PAC_INT_FLUSH_CTRL, 0x000000FF }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK1 }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK2 }, +}; + +static const struct reg_sequence cs35l36_revb0_errata_patch[] = { + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK1 }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK2 }, + { 0x00007064, 0x0929A800 }, + { 0x00007850, 0x00002FA9 }, + { 0x00007854, 0x0003F1D5 }, + { 0x00007858, 0x0003F5E3 }, + { 0x0000785C, 0x00001137 }, + { 0x00007860, 0x0001A7A5 }, + { 0x00007864, 0x0002F16A }, + { 0x00007868, 0x00003E21 }, + { 0x00007848, 0x00000001 }, + { 0x00003854, 0x05180240 }, + { 0x00007418, 0x509001C8 }, + { 0x0000394C, 0x028764BD }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK1 }, + { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK2 }, +}; + +static int cs35l36_i2c_probe(struct i2c_client *i2c_client, + const struct i2c_device_id *id) +{ + struct cs35l36_private *cs35l36; + struct device *dev = &i2c_client->dev; + struct cs35l36_platform_data *pdata = dev_get_platdata(dev); + struct irq_data *irq_d; + int ret, irq_pol, chip_irq_pol, i; + u32 reg_id, reg_revid, l37_id_reg; + + cs35l36 = devm_kzalloc(dev, sizeof(struct cs35l36_private), GFP_KERNEL); + if (!cs35l36) + return -ENOMEM; + + cs35l36->dev = dev; + + i2c_set_clientdata(i2c_client, cs35l36); + cs35l36->regmap = devm_regmap_init_i2c(i2c_client, &cs35l36_regmap); + if (IS_ERR(cs35l36->regmap)) { + ret = PTR_ERR(cs35l36->regmap); + dev_err(dev, "regmap_init() failed: %d\n", ret); + goto err; + } + + cs35l36->num_supplies = ARRAY_SIZE(cs35l36_supplies); + for (i = 0; i < ARRAY_SIZE(cs35l36_supplies); i++) + cs35l36->supplies[i].supply = cs35l36_supplies[i]; + + ret = devm_regulator_bulk_get(dev, cs35l36->num_supplies, + cs35l36->supplies); + if (ret != 0) { + dev_err(dev, "Failed to request core supplies: %d\n", ret); + return ret; + } + + if (pdata) { + cs35l36->pdata = *pdata; + } else { + pdata = devm_kzalloc(dev, sizeof(struct cs35l36_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + if (i2c_client->dev.of_node) { + ret = cs35l36_handle_of_data(i2c_client, pdata); + if (ret != 0) + return ret; + + } + + cs35l36->pdata = *pdata; + } + + ret = regulator_bulk_enable(cs35l36->num_supplies, cs35l36->supplies); + if (ret != 0) { + dev_err(dev, "Failed to enable core supplies: %d\n", ret); + return ret; + } + + /* returning NULL can be an option if in stereo mode */ + cs35l36->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(cs35l36->reset_gpio)) { + ret = PTR_ERR(cs35l36->reset_gpio); + cs35l36->reset_gpio = NULL; + if (ret == -EBUSY) { + dev_info(dev, "Reset line busy, assuming shared reset\n"); + } else { + dev_err(dev, "Failed to get reset GPIO: %d\n", ret); + goto err_disable_regs; + } + } + + if (cs35l36->reset_gpio) + gpiod_set_value_cansleep(cs35l36->reset_gpio, 1); + + usleep_range(2000, 2100); + + /* initialize amplifier */ + ret = regmap_read(cs35l36->regmap, CS35L36_SW_RESET, ®_id); + if (ret < 0) { + dev_err(dev, "Get Device ID failed %d\n", ret); + goto err; + } + + if (reg_id != CS35L36_CHIP_ID) { + dev_err(dev, "Device ID (%X). Expected ID %X\n", reg_id, + CS35L36_CHIP_ID); + ret = -ENODEV; + goto err; + } + + ret = regmap_read(cs35l36->regmap, CS35L36_REV_ID, ®_revid); + if (ret < 0) { + dev_err(&i2c_client->dev, "Get Revision ID failed %d\n", ret); + goto err; + } + + cs35l36->rev_id = reg_revid >> 8; + + ret = regmap_read(cs35l36->regmap, CS35L36_OTP_MEM30, &l37_id_reg); + if (ret < 0) { + dev_err(&i2c_client->dev, "Failed to read otp_id Register %d\n", + ret); + return ret; + } + + if ((l37_id_reg & CS35L36_OTP_REV_MASK) == CS35L36_OTP_REV_L37) + cs35l36->chip_version = CS35L36_12V_L37; + else + cs35l36->chip_version = CS35L36_10V_L36; + + switch (cs35l36->rev_id) { + case CS35L36_REV_A0: + ret = regmap_register_patch(cs35l36->regmap, + cs35l36_reva0_errata_patch, + ARRAY_SIZE(cs35l36_reva0_errata_patch)); + if (ret < 0) { + dev_err(dev, "Failed to apply A0 errata patch %d\n", + ret); + goto err; + } + break; + case CS35L36_REV_B0: + ret = cs35l36_pac(cs35l36); + if (ret < 0) { + dev_err(dev, "Failed to Trim OTP %d\n", ret); + goto err; + } + + ret = regmap_register_patch(cs35l36->regmap, + cs35l36_revb0_errata_patch, + ARRAY_SIZE(cs35l36_revb0_errata_patch)); + if (ret < 0) { + dev_err(dev, "Failed to apply B0 errata patch %d\n", + ret); + goto err; + } + break; + } + + if (pdata->vpbr_config.is_present) + cs35l36_apply_vpbr_config(cs35l36); + + irq_d = irq_get_irq_data(i2c_client->irq); + if (!irq_d) { + dev_err(&i2c_client->dev, "Invalid IRQ: %d\n", i2c_client->irq); + ret = -ENODEV; + goto err; + } + + irq_pol = irqd_get_trigger_type(irq_d); + + switch (irq_pol) { + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: + chip_irq_pol = 0; + break; + case IRQF_TRIGGER_RISING: + case IRQF_TRIGGER_HIGH: + chip_irq_pol = 1; + break; + default: + dev_err(cs35l36->dev, "Invalid IRQ polarity: %d\n", irq_pol); + ret = -EINVAL; + goto err; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE, + CS35L36_INT_POL_SEL_MASK, + chip_irq_pol << CS35L36_INT_POL_SEL_SHIFT); + + ret = devm_request_threaded_irq(dev, i2c_client->irq, NULL, cs35l36_irq, + IRQF_ONESHOT | irq_pol, "cs35l36", + cs35l36); + if (ret != 0) { + dev_err(dev, "Failed to request IRQ: %d\n", ret); + goto err; + } + + regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE, + CS35L36_INT_OUTPUT_EN_MASK, 1); + + /* Set interrupt masks for critical errors */ + regmap_write(cs35l36->regmap, CS35L36_INT1_MASK, + CS35L36_INT1_MASK_DEFAULT); + regmap_write(cs35l36->regmap, CS35L36_INT3_MASK, + CS35L36_INT3_MASK_DEFAULT); + + dev_info(&i2c_client->dev, "Cirrus Logic CS35L%d, Revision: %02X\n", + cs35l36->chip_version, reg_revid >> 8); + + ret = devm_snd_soc_register_component(dev, &soc_component_dev_cs35l36, + cs35l36_dai, + ARRAY_SIZE(cs35l36_dai)); + if (ret < 0) { + dev_err(dev, "%s: Register component failed %d\n", __func__, + ret); + goto err; + } + + return 0; + +err: + gpiod_set_value_cansleep(cs35l36->reset_gpio, 0); + +err_disable_regs: + regulator_bulk_disable(cs35l36->num_supplies, cs35l36->supplies); + return ret; +} + +static int cs35l36_i2c_remove(struct i2c_client *client) +{ + struct cs35l36_private *cs35l36 = i2c_get_clientdata(client); + + /* Reset interrupt masks for device removal */ + regmap_write(cs35l36->regmap, CS35L36_INT1_MASK, + CS35L36_INT1_MASK_RESET); + regmap_write(cs35l36->regmap, CS35L36_INT3_MASK, + CS35L36_INT3_MASK_RESET); + + if (cs35l36->reset_gpio) + gpiod_set_value_cansleep(cs35l36->reset_gpio, 0); + + regulator_bulk_disable(cs35l36->num_supplies, cs35l36->supplies); + + return 0; +} +static const struct of_device_id cs35l36_of_match[] = { + {.compatible = "cirrus,cs35l36"}, + {}, +}; +MODULE_DEVICE_TABLE(of, cs35l36_of_match); + +static const struct i2c_device_id cs35l36_id[] = { + {"cs35l36", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, cs35l36_id); + +static struct i2c_driver cs35l36_i2c_driver = { + .driver = { + .name = "cs35l36", + .of_match_table = cs35l36_of_match, + }, + .id_table = cs35l36_id, + .probe = cs35l36_i2c_probe, + .remove = cs35l36_i2c_remove, +}; +module_i2c_driver(cs35l36_i2c_driver); + +MODULE_DESCRIPTION("ASoC CS35L36 driver"); +MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/cs35l36.h b/sound/soc/codecs/cs35l36.h new file mode 100644 index 000000000000..f6e38c633b93 --- /dev/null +++ b/sound/soc/codecs/cs35l36.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * cs35l36.h -- CS35L36 ALSA SoC audio driver + * + * Copyright 2018 Cirrus Logic, Inc. + * + * Author: James Schulman <james.schulman@cirrus.com> + * + */ + +#ifndef __CS35L36_H__ +#define __CS35L36_H__ + +#include <linux/regmap.h> + +#define CS35L36_FIRSTREG 0x00000000 +#define CS35L36_LASTREG 0x00E037FC +#define CS35L36_SW_RESET 0x00000000 +#define CS35L36_SW_REV 0x00000004 +#define CS35L36_HW_REV 0x00000008 +#define CS35L36_TESTKEY_CTRL 0x00000020 +#define CS35L36_USERKEY_CTL 0x00000024 +#define CS35L36_OTP_MEM30 0x00000478 +#define CS35L36_OTP_CTRL1 0x00000500 +#define CS35L36_OTP_CTRL2 0x00000504 +#define CS35L36_OTP_CTRL3 0x00000508 +#define CS35L36_OTP_CTRL4 0x0000050C +#define CS35L36_OTP_CTRL5 0x00000510 +#define CS35L36_PAC_CTL1 0x00000C00 +#define CS35L36_PAC_CTL2 0x00000C04 +#define CS35L36_PAC_CTL3 0x00000C08 +#define CS35L36_DEVICE_ID 0x00002004 +#define CS35L36_FAB_ID 0x00002008 +#define CS35L36_REV_ID 0x0000200C +#define CS35L36_PWR_CTRL1 0x00002014 +#define CS35L36_PWR_CTRL2 0x00002018 +#define CS35L36_PWR_CTRL3 0x0000201C +#define CS35L36_CTRL_OVRRIDE 0x00002020 +#define CS35L36_AMP_OUT_MUTE 0x00002024 +#define CS35L36_OTP_TRIM_STATUS 0x00002028 +#define CS35L36_DISCH_FILT 0x0000202C +#define CS35L36_OSC_TRIM 0x00002030 +#define CS35L36_PROTECT_REL_ERR 0x00002034 +#define CS35L36_PAD_INTERFACE 0x00002400 +#define CS35L36_PLL_CLK_CTRL 0x00002C04 +#define CS35L36_GLOBAL_CLK_CTRL 0x00002C0C +#define CS35L36_ADC_CLK_CTRL 0x00002C10 +#define CS35L36_SWIRE_CLK_CTRL 0x00002C14 +#define CS35L36_SP_SCLK_CLK_CTRL 0x00002D00 +#define CS35L36_TST_FS_MON0 0x00002D10 +#define CS35L36_PLL_LOOP_PARAMS 0x00003008 +#define CS35L36_DCO_CTRL 0x00003010 +#define CS35L36_MISC_CTRL 0x00003014 +#define CS35L36_MDSYNC_EN 0x00003404 +#define CS35L36_MDSYNC_TX_ID 0x00003408 +#define CS35L36_MDSYNC_PWR_CTRL 0x0000340C +#define CS35L36_MDSYNC_DATA_TX 0x00003410 +#define CS35L36_MDSYNC_TX_STATUS 0x0000341C +#define CS35L36_MDSYNC_RX_STATUS 0x00003420 +#define CS35L36_MDSYNC_ERR_STATUS 0x00003424 +#define CS35L36_BSTCVRT_VCTRL1 0x00003800 +#define CS35L36_BSTCVRT_VCTRL2 0x00003804 +#define CS35L36_BSTCVRT_PEAK_CUR 0x00003808 +#define CS35L36_BSTCVRT_SFT_RAMP 0x0000380C +#define CS35L36_BSTCVRT_COEFF 0x00003810 +#define CS35L36_BSTCVRT_SLOPE_LBST 0x00003814 +#define CS35L36_BSTCVRT_SW_FREQ 0x00003818 +#define CS35L36_BSTCVRT_DCM_CTRL 0x0000381C +#define CS35L36_BSTCVRT_DCM_MODE_FORCE 0x00003820 +#define CS35L36_BSTCVRT_OVERVOLT_CTRL 0x00003830 +#define CS35L36_BST_TST_MANUAL 0x0000393C +#define CS35L36_BST_ANA2_TEST 0x0000394C +#define CS35L36_VPI_LIMIT_MODE 0x00003C04 +#define CS35L36_VPI_LIMIT_MINMAX 0x00003C08 +#define CS35L36_VPI_VP_THLD 0x00003C0C +#define CS35L36_VPI_TRACK_CTRL 0x00003C10 +#define CS35L36_VPI_TRIG_MODE_CTRL 0x00003C14 +#define CS35L36_VPI_TRIG_STEPS 0x00003C18 +#define CS35L36_VI_SPKMON_FILT 0x00004004 +#define CS35L36_VI_SPKMON_GAIN 0x00004008 +#define CS35L36_VI_SPKMON_IP_SEL 0x00004100 +#define CS35L36_DTEMP_WARN_THLD 0x00004220 +#define CS35L36_DTEMP_STATUS 0x00004300 +#define CS35L36_VPVBST_FS_SEL 0x00004400 +#define CS35L36_VPVBST_VP_CTRL 0x00004440 +#define CS35L36_VPVBST_VBST_CTRL 0x00004444 +#define CS35L36_ASP_TX_PIN_CTRL 0x00004800 +#define CS35L36_ASP_RATE_CTRL 0x00004804 +#define CS35L36_ASP_FORMAT 0x00004808 +#define CS35L36_ASP_FRAME_CTRL 0x00004818 +#define CS35L36_ASP_TX1_TX2_SLOT 0x0000481C +#define CS35L36_ASP_TX3_TX4_SLOT 0x00004820 +#define CS35L36_ASP_TX5_TX6_SLOT 0x00004824 +#define CS35L36_ASP_TX7_TX8_SLOT 0x00004828 +#define CS35L36_ASP_RX1_SLOT 0x0000482C +#define CS35L36_ASP_RX_TX_EN 0x0000483C +#define CS35L36_ASP_RX1_SEL 0x00004C00 +#define CS35L36_ASP_TX1_SEL 0x00004C20 +#define CS35L36_ASP_TX2_SEL 0x00004C24 +#define CS35L36_ASP_TX3_SEL 0x00004C28 +#define CS35L36_ASP_TX4_SEL 0x00004C2C +#define CS35L36_ASP_TX5_SEL 0x00004C30 +#define CS35L36_ASP_TX6_SEL 0x00004C34 +#define CS35L36_SWIRE_P1_TX1_SEL 0x00004C40 +#define CS35L36_SWIRE_P1_TX2_SEL 0x00004C44 +#define CS35L36_SWIRE_P2_TX1_SEL 0x00004C60 +#define CS35L36_SWIRE_P2_TX2_SEL 0x00004C64 +#define CS35L36_SWIRE_P2_TX3_SEL 0x00004C68 +#define CS35L36_SWIRE_DP1_FIFO_CFG 0x00005000 +#define CS35L36_SWIRE_DP2_FIFO_CFG 0x00005004 +#define CS35L36_SWIRE_DP3_FIFO_CFG 0x00005008 +#define CS35L36_SWIRE_PCM_RX_DATA 0x0000500C +#define CS35L36_SWIRE_FS_SEL 0x00005010 +#define CS35L36_SPARE_CP_BITS 0x00005C00 +#define CS35L36_AMP_DIG_VOL_CTRL 0x00006000 +#define CS35L36_VPBR_CFG 0x00006404 +#define CS35L36_VBBR_CFG 0x00006408 +#define CS35L36_VPBR_STATUS 0x0000640C +#define CS35L36_VBBR_STATUS 0x00006410 +#define CS35L36_OVERTEMP_CFG 0x00006414 +#define CS35L36_AMP_ERR_VOL 0x00006418 +#define CS35L36_CLASSH_CFG 0x00006800 +#define CS35L36_CLASSH_FET_DRV_CFG 0x00006804 +#define CS35L36_NG_CFG 0x00006808 +#define CS35L36_AMP_GAIN_CTRL 0x00006C04 +#define CS35L36_PWM_MOD_IO_CTRL 0x0000706C +#define CS35L36_PWM_MOD_STATUS 0x00007070 +#define CS35L36_DAC_MSM_CFG 0x00007400 +#define CS35L36_AMP_SLOPE_CTRL 0x00007410 +#define CS35L36_AMP_PDM_VOLUME 0x00007E04 +#define CS35L36_AMP_PDM_RATE_CTRL 0x00007E08 +#define CS35L36_PDM_CH_SEL 0x00007E10 +#define CS35L36_AMP_NG_CTRL 0x00007E14 +#define CS35L36_PDM_HIGHFILT_CTRL 0x00007E3C +#define CS35L36_INT1_STATUS 0x00D00000 +#define CS35L36_INT2_STATUS 0x00D00004 +#define CS35L36_INT3_STATUS 0x00D00008 +#define CS35L36_INT4_STATUS 0x00D0000C +#define CS35L36_INT1_RAW_STATUS 0x00D00020 +#define CS35L36_INT2_RAW_STATUS 0x00D00024 +#define CS35L36_INT3_RAW_STATUS 0x00D00028 +#define CS35L36_INT4_RAW_STATUS 0x00D0002C +#define CS35L36_INT1_MASK 0x00D00040 +#define CS35L36_INT2_MASK 0x00D00044 +#define CS35L36_INT3_MASK 0x00D00048 +#define CS35L36_INT4_MASK 0x00D0004C +#define CS35L36_INT1_EDGE_LVL_CTRL 0x00D00060 +#define CS35L36_INT3_EDGE_LVL_CTRL 0x00D00068 +#define CS35L36_PAC_INT_STATUS 0x00D00200 +#define CS35L36_PAC_INT_RAW_STATUS 0x00D00210 +#define CS35L36_PAC_INT_FLUSH_CTRL 0x00D00218 +#define CS35L36_PAC_INT0_CTRL 0x00D00220 +#define CS35L36_PAC_INT1_CTRL 0x00D00224 +#define CS35L36_PAC_INT2_CTRL 0x00D00228 +#define CS35L36_PAC_INT3_CTRL 0x00D0022C +#define CS35L36_PAC_INT4_CTRL 0x00D00230 +#define CS35L36_PAC_INT5_CTRL 0x00D00234 +#define CS35L36_PAC_INT6_CTRL 0x00D00238 +#define CS35L36_PAC_INT7_CTRL 0x00D0023C +#define CS35L36_PAC_PMEM_WORD0 0x00E02800 +#define CS35L36_PAC_PMEM_WORD1 0x00E02804 +#define CS35L36_PAC_PMEM_WORD1023 0x00E037FC + +#define CS35L36_INTPAC_REG_COUNT 25 +#define CS35L36_CHIP_ID 0x00035A36 + +#define CS35L36_INT_OUTPUT_EN_MASK 0x01 +#define CS35L36_INT_GPIO_SEL_MASK 0x02 +#define CS35L36_INT_GPIO_SEL_SHIFT 1 +#define CS35L36_INT_POL_SEL_MASK 0x04 +#define CS35L36_INT_POL_SEL_SHIFT 2 +#define CS35L36_INT_DRV_SEL_MASK 0x20 +#define CS35L36_INT_DRV_SEL_SHIFT 5 +#define CS35L36_IRQ_SRC_MASK 0x08 +#define CS35L36_IRQ_SRC_SHIFT 3 + +#define CS35L36_SCLK_MSTR_MASK 0x40 +#define CS35L36_SCLK_MSTR_SHIFT 6 +#define CS35L36_LRCLK_MSTR_MASK 0x01 +#define CS35L36_LRCLK_MSTR_SHIFT 0 +#define CS35L36_SCLK_INV_MASK 0x100 +#define CS35L36_SCLK_INV_SHIFT 8 +#define CS35L36_LRCLK_INV_MASK 0x04 +#define CS35L36_LRCLK_INV_SHIFT 2 +#define CS35L36_SCLK_FRC_MASK 0x80 +#define CS35L36_SCLK_FRC_SHIFT 7 +#define CS35L36_LRCLK_FRC_MASK 0x02 +#define CS35L36_LRCLK_FRC_SHIFT 1 + +#define CS35L36_PDM_MODE_MASK 0x01 +#define CS35L36_PDM_MODE_SHIFT 0 + +#define CS35L36_ASP_FMT_MASK 0x07 +#define CS35L36_ASP_FMT_SHIFT 0 + +#define CS35L36_ASP_RX_WIDTH_MASK 0xFF0000 +#define CS35L36_ASP_RX_WIDTH_SHIFT 16 +#define CS35L36_ASP_TX_WIDTH_MASK 0xFF +#define CS35L36_ASP_TX_WIDTH_SHIFT 0 +#define CS35L36_ASP_WIDTH_16 0x10 +#define CS35L36_ASP_WIDTH_24 0x18 +#define CS35L36_ASP_WIDTH_32 0x20 + +#define CS35L36_ASP_RX1_SLOT_MASK 0x3F +#define CS35L36_ASP_RX1_EN_MASK 0x00010000 +#define CS35L36_ASP_RX1_EN_SHIFT 16 + +#define CS35L36_ASP_TX1_SLOT_MASK 0x3F +#define CS35L36_ASP_TX2_SLOT_MASK 0x3F0000 +#define CS35L36_ASP_TX2_SLOT_SHIFT 16 +#define CS35L36_ASP_TX3_SLOT_MASK 0x3F +#define CS35L36_ASP_TX4_SLOT_MASK 0x3F0000 +#define CS35L36_ASP_TX4_SLOT_SHIFT 16 +#define CS35L36_ASP_TX5_SLOT_MASK 0x3F +#define CS35L36_ASP_TX6_SLOT_MASK 0x3F0000 +#define CS35L36_ASP_TX6_SLOT_SHIFT 16 +#define CS35L36_ASP_TX7_SLOT_MASK 0x3F +#define CS35L36_ASP_TX8_SLOT_MASK 0x3F0000 +#define CS35L36_ASP_TX8_SLOT_SHIFT 16 +#define CS35L36_ASP_TX_HIZ_MASK 0x200000 + +#define CS35L36_APS_TX_SEL_MASK 0x7F + +#define CS35L36_ASP_TX1_EN_MASK 0x01 +#define CS35L36_ASP_TX2_EN_MASK 0x02 +#define CS35L36_ASP_TX2_EN_SHIFT 1 +#define CS35L36_ASP_TX3_EN_MASK 0x04 +#define CS35L36_ASP_TX3_EN_SHIFT 2 +#define CS35L36_ASP_TX4_EN_MASK 0x08 +#define CS35L36_ASP_TX4_EN_SHIFT 3 +#define CS35L36_ASP_TX5_EN_MASK 0x10 +#define CS35L36_ASP_TX5_EN_SHIFT 4 +#define CS35L36_ASP_TX6_EN_MASK 0x20 +#define CS35L36_ASP_TX6_EN_SHIFT 5 +#define CS35L36_ASP_TX7_EN_MASK 0x40 +#define CS35L36_ASP_TX7_EN_SHIFT 6 +#define CS35L36_ASP_TX8_EN_MASK 0x80 +#define CS35L36_ASP_TX8_EN_SHIFT 7 + + +#define CS35L36_PLL_CLK_SEL_MASK 0x07 +#define CS35L36_PLL_CLK_SEL_SHIFT 0 +#define CS35L36_PLLSRC_SCLK 0 +#define CS35L36_PLLSRC_LRCLK 1 +#define CS35L36_PLLSRC_SELF 3 +#define CS35L36_PLLSRC_PDMCLK 4 +#define CS35L36_PLLSRC_MCLK 5 +#define CS35L36_PLLSRC_SWIRE 7 +#define CS35L36_REFCLK_FREQ_MASK 0x7E0 +#define CS35L36_REFCLK_FREQ_SHIFT 5 +#define CS35L36_PLL_OPENLOOP_MASK 0x800 +#define CS35L36_PLL_OPENLOOP_SHIFT 11 +#define CS35L36_PLL_REFCLK_EN_MASK 0x10 +#define CS35L36_PLL_REFCLK_EN_SHIFT 4 + + +#define CS35L36_GLOBAL_FS_MASK 0x1F +#define CS35L36_GLOBAL_FS_SHIFT 0 + +#define CS35L36_HPF_PCM_EN_MASK 0x800 +#define CS35L36_HPF_PCM_EN_SHIFT 15 +#define CS35L36_PCM_RX_SEL_MASK 0x7F +#define CS35L36_PCM_RX_SEL_SHIFT 0 + +#define CS35L36_PCM_RX_SEL_ZERO 0x00 +#define CS35L36_PCM_RX_SEL_PCM 0x08 +#define CS35L36_PCM_RX_SEL_SWIRE 0x10 +#define CS35L36_PCM_RX_SEL_DIAG 0x04 + +#define CS35L36_GLOBAL_EN_MASK 0x01 +#define CS35L36_GLOBAL_EN_SHIFT 0x00 + +#define CS35L36_AMP_PCM_INV_MASK 0x4000 +#define CS35L36_AMP_PCM_INV_SHIFT 14 + +#define CS35L36_AMP_VOL_PCM_MASK 0x3FF8 +#define CS35L36_AMP_VOL_PCM_SHIFT 3 +#define CS35L36_DIGITAL_MUTE 0x04CF + +#define CS35L36_AMP_RAMP_MASK 0x0007 +#define CS35L36_AMP_RAMP_SHIFT 0 + +#define CS35L36_AMP_MUTE_MASK 0x0010 +#define CS35L36_AMP_MUTE_SHIFT 4 + +#define CS35L36_GLOBAL_RESYNC_FS1_MASK 0x00000200 +#define CS35L36_GLOBAL_RESYNC_FS2_MASK 0x00000400 +#define CS35L36_SYNC_GLOBAL_OVR_MASK 0x00000002 +#define CS35L36_SYNC_GLOBAL_OVR_SHIFT 1 + +#define CS35L36_REFCLK_IN_MASK 0x00100000 +#define CS35L36_PLL_UNLOCK_MASK 0x00002000 + +#define CS35L36_ASP_RX_UDF_MASK 0x00000040 +#define CS35L36_ASP_RX_OVF_MASK 0x00000080 + +#define CS35L36_IMON_POL_MASK 0x02 +#define CS35L36_IMON_POL_SHIFT 1 + +#define CS35L36_VMON_POL_MASK 0x01 +#define CS35L36_VMON_POL_SHIFT 0 + +#define CS35L36_PDN_DONE 0x40 +#define CS35L36_PDN_DONE_SHIFT 6 +#define CS35L36_PUP_DONE 0x80 +#define CS35L36_PUP_DONE_SHIFT 7 +#define CS35L36_GLOBAL_EN_ASSRT 0x20 +#define CS35L36_PUP_DONE_IRQ_UNMASK 0x7F +#define CS35L36_PUP_DONE_IRQ_MASK 0xBF + +#define CS35L36_FS1_WINDOW_MASK 0x000007FF +#define CS35L36_FS2_WINDOW_MASK 0x00FFF800 +#define CS35L36_FS2_WINDOW_SHIFT 12 + +#define CS35L36_PLL_FFL_IGAIN_MASK 0x0F +#define CS35L36_PLL_IGAIN_MASK 0x3F0 +#define CS35L36_PLL_IGAIN_SHIFT 4 +#define CS35L36_PLL_IGAIN 0x04 + +#define CS35L36_BST_EN_MASK 0x30 +#define CS35L36_BST_EN 0x02 +#define CS35L36_BST_DIS_VP 0x01 +#define CS35L36_BST_DIS_EXTN 0x00 +#define CS35L36_BST_EN_SHIFT 4 +#define CS35L36_BST_MAN_IPKCOMP_MASK 0x200 +#define CS35L36_BST_MAN_IPKCOMP_SHIFT 9 + +#define CS35L36_BST_MAN_IPKCOMP_EN_MASK 0x100 +#define CS35L36_BST_MAN_IPKCOMP_EN_SHIFT 8 + +#define CS35L36_BST_IPK_MASK 0x7F +#define CS35L36_BST_OVP_THLD_MASK 0x3F +#define CS35L36_BST_OVP_THLD_11V 0x10 +#define CS35L36_BST_OVP_TRIM_MASK 0x00078000 +#define CS35L36_BST_OVP_TRIM_SHIFT 15 +#define CS35L36_BST_OVP_TRIM_11V 0x0C +#define CS35L36_BST_CTRL_LIM_MASK 0x04 +#define CS35L36_BST_CTRL_LIM_SHIFT 2 +#define CS35L36_BST_CTRL_10V_CLAMP 0x96 + +#define CS35L36_NG_AMP_EN_MASK 0x3F00 +#define CS35L36_NG_DELAY_MASK 0x70 +#define CS35L36_NG_DELAY_SHIFT 4 +#define CS35L36_AMP_ZC_SHIFT 10 +#define CS35L36_PDM_LDM_ENTER_SHIFT 3 +#define CS35L36_PDM_LDM_EXIT_SHIFT 4 + +#define CS35L36_BSTCVRT_K1_MASK 0xFF +#define CS35L36_BSTCVRT_K2_MASK 0xFF00 +#define CS35L36_BSTCVRT_K2_SHIFT 8 +#define CS35L36_BSTCVRT_SLOPE_MASK 0xFF00 +#define CS35L36_BSTCVRT_SLOPE_SHIFT 8 +#define CS35L36_BSTCVRT_CCMFREQ_MASK 0x0F +#define CS35L36_BSTCVRT_LBSTVAL_MASK 0x03 +#define CS35L35_BSTCVRT_CTL_MASK 0xFF +#define CS35L35_BSTCVRT_CTL_SEL_MASK 0x03 +#define CS35L36_DCM_AUTO_MASK 0x01 + +#define CS35L36_INT1_MASK_DEFAULT 0xF9BA7FFF +#define CS35L36_INT1_MASK_RESET 0xFFFFFFFF +#define CS35L36_INT3_MASK_DEFAULT 0xFFFFEFFF +#define CS35L36_INT3_MASK_RESET 0xFFFFFFFF + + +#define CS35L36_AMP_SHORT_ERR 0x1000 +#define CS35L36_BST_SHORT_ERR 0x40000 +#define CS35L36_TEMP_WARN 0x2000000 +#define CS35L36_TEMP_ERR 0x4000000 +#define CS35L36_BST_OVP_ERR 0x10000 +#define CS35L36_BST_DCM_UVP_ERR 0x20000 + +#define CS35L36_AMP_SHORT_ERR_RLS 0x02 +#define CS35L36_BST_SHORT_ERR_RLS 0x04 +#define CS35L36_BST_OVP_ERR_RLS 0x08 +#define CS35L36_BST_UVP_ERR_RLS 0x10 +#define CS35L36_TEMP_WARN_ERR_RLS 0x20 +#define CS35L36_TEMP_ERR_RLS 0x40 +#define CS35L36_TEMP_THLD_MASK 0x03 + +#define CS35L36_REV_B0 0xb0 +#define CS35L36_REV_A0 0xa0 +#define CS35L36_B0_PAC_PATCH 0x00DD0102 + +#define CS35L36_OTP_ECC_EN_MASK 0x400 +#define CS35L36_OTP_ECC_EN_SHIFT 10 +#define CS35L36_OTP_RUN_BOOT_MASK 0x01 +#define CS35L36_OTP_BOOT_DONE 0x2000000 +#define CS35L36_PAC_RESET_MASK 0x04 +#define CS35L36_PAC_RESET_SHIFT 2 +#define CS35L36_PAC_STALL_MASK 0x02 +#define CS35L36_PAC_STALL_SHIFT 1 +#define CS35L36_PAC_ENABLE_MASK 0x00000001 +#define CS35L36_PAC_MEM_ACCESS 0x01 +#define CS35L36_PAC_MEM_ACCESS_CLR 0 +#define CS35L36_SOFT_RESET 0x5AAA +#define CS35L36_MCU_BOOT_COMPLETE 0x02 +#define CS35L36_MCU_CONFIG_UNMASK 0x00FEFFFF +#define CS35L36_MCU_CONFIG_CLR 0x00010000 +#define CS35L36_MCU_CONFIG_MASK 0x00FFFFFF +#define CS35L36_GPIO_INT_SEL_MASK 0x0000003B +#define CS35L36_GPIO_INT_SEL_UNMASK 0x0000003A +#define CS35L36_PAC_RESET 0x00000000 +#define CS35L36_OTP_REV_MASK 0x00FF0000 +#define CS35L36_OTP_REV_L37 0x00CC0000 +#define CS35L36_12V_L37 37 +#define CS35L36_10V_L36 36 + +#define CS35L36_VPBR_EN_MASK 0x00001000 +#define CS35L36_VPBR_EN_SHIFT 12 + +#define CS35L36_VPBR_THLD_MASK 0x0000001F +#define CS35L36_VPBR_THLD_SHIFT 0 +#define CS35L36_VPBR_MAX_ATTN_MASK 0x00000F00 +#define CS35L36_VPBR_MAX_ATTN_SHIFT 8 +#define CS35L36_VPBR_ATK_VOL_MASK 0x0000F000 +#define CS35L36_VPBR_ATK_VOL_SHIFT 12 +#define CS35L36_VPBR_ATK_RATE_MASK 0x00070000 +#define CS35L36_VPBR_ATK_RATE_SHIFT 16 +#define CS35L36_VPBR_WAIT_MASK 0x00180000 +#define CS35L36_VPBR_WAIT_SHIFT 19 +#define CS35L36_VPBR_REL_RATE_MASK 0x00E00000 +#define CS35L36_VPBR_REL_RATE_SHIFT 21 +#define CS35L36_VPBR_MUTE_EN_MASK 0x01000000 +#define CS35L36_VPBR_MUTE_EN_SHIFT 24 + +#define CS35L36_OSC_FREQ_TRIM_MASK 0x070 +#define CS35L36_OSC_TRIM_DONE 0x08 + +#define CS35L36_FS1_DEFAULT_VAL 16 +#define CS35L36_FS2_DEFAULT_VAL 36 +#define CS35L36_FS_NOM_6MHZ 6000000 + +#define CS35L36_TEST_UNLOCK1 0x00005555 +#define CS35L36_TEST_UNLOCK2 0x0000AAAA +#define CS35L36_TEST_LOCK1 0x0000CCCC +#define CS35L36_TEST_LOCK2 0x00003333 + +#define CS35L36_PAC_PROG_MEM 512 + +#define CS35L36_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) +#define CS35L36_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE \ + | SNDRV_PCM_FMTBIT_S32_LE) + +extern const int cs35l36_a0_pac_patch[CS35L36_PAC_PROG_MEM]; + +#endif diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index b1df4bb36105..121a8190f93e 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c @@ -1376,11 +1376,7 @@ static int da7219_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) return -EINVAL; } - /* By default 64 BCLKs per WCLK is supported */ - dai_clk_mode |= DA7219_DAI_BCLKS_PER_WCLK_64; - snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE, - DA7219_DAI_BCLKS_PER_WCLK_MASK | DA7219_DAI_CLK_POL_MASK | DA7219_DAI_WCLK_POL_MASK, dai_clk_mode); snd_soc_component_update_bits(component, DA7219_DAI_CTRL, DA7219_DAI_FORMAT_MASK, @@ -1395,69 +1391,83 @@ static int da7219_set_dai_tdm_slot(struct snd_soc_dai *dai, { struct snd_soc_component *component = dai->component; struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component); - u8 dai_bclks_per_wclk; - __le16 offset; + unsigned int ch_mask; + u8 dai_bclks_per_wclk, slot_offset; + u16 offset; + __le16 dai_offset; u32 frame_size; - /* No channels enabled so disable TDM, revert to 64-bit frames */ + /* No channels enabled so disable TDM */ if (!tx_mask) { snd_soc_component_update_bits(component, DA7219_DAI_TDM_CTRL, DA7219_DAI_TDM_CH_EN_MASK | DA7219_DAI_TDM_MODE_EN_MASK, 0); - snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE, - DA7219_DAI_BCLKS_PER_WCLK_MASK, - DA7219_DAI_BCLKS_PER_WCLK_64); + da7219->tdm_en = false; return 0; } /* Check we have valid slots */ - if (fls(tx_mask) > DA7219_DAI_TDM_MAX_SLOTS) { - dev_err(component->dev, "Invalid number of slots, max = %d\n", + slot_offset = ffs(tx_mask) - 1; + ch_mask = (tx_mask >> slot_offset); + if (fls(ch_mask) > DA7219_DAI_TDM_MAX_SLOTS) { + dev_err(component->dev, + "Invalid number of slots, max = %d\n", DA7219_DAI_TDM_MAX_SLOTS); return -EINVAL; } - /* Check we have a valid offset given */ - if (rx_mask > DA7219_DAI_OFFSET_MAX) { - dev_err(component->dev, "Invalid slot offset, max = %d\n", - DA7219_DAI_OFFSET_MAX); + /* + * Ensure we have a valid offset into the frame, based on slot width + * and slot offset of first slot we're interested in. + */ + offset = slot_offset * slot_width; + if (offset > DA7219_DAI_OFFSET_MAX) { + dev_err(component->dev, "Invalid frame offset %d\n", offset); return -EINVAL; } - /* Calculate & validate frame size based on slot info provided. */ - frame_size = slots * slot_width; - switch (frame_size) { - case 32: - dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32; - break; - case 64: - dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64; - break; - case 128: - dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_128; - break; - case 256: - dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_256; - break; - default: - dev_err(component->dev, "Invalid frame size %d\n", frame_size); - return -EINVAL; - } + /* + * If we're master, calculate & validate frame size based on slot info + * provided as we have a limited set of rates available. + */ + if (da7219->master) { + frame_size = slots * slot_width; + switch (frame_size) { + case 32: + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32; + break; + case 64: + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64; + break; + case 128: + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_128; + break; + case 256: + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_256; + break; + default: + dev_err(component->dev, "Invalid frame size %d\n", + frame_size); + return -EINVAL; + } - snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE, - DA7219_DAI_BCLKS_PER_WCLK_MASK, - dai_bclks_per_wclk); + snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE, + DA7219_DAI_BCLKS_PER_WCLK_MASK, + dai_bclks_per_wclk); + } - offset = cpu_to_le16(rx_mask); + dai_offset = cpu_to_le16(offset); regmap_bulk_write(da7219->regmap, DA7219_DAI_OFFSET_LOWER, - &offset, sizeof(offset)); + &dai_offset, sizeof(dai_offset)); snd_soc_component_update_bits(component, DA7219_DAI_TDM_CTRL, DA7219_DAI_TDM_CH_EN_MASK | DA7219_DAI_TDM_MODE_EN_MASK, - (tx_mask << DA7219_DAI_TDM_CH_EN_SHIFT) | + (ch_mask << DA7219_DAI_TDM_CH_EN_SHIFT) | DA7219_DAI_TDM_MODE_EN_MASK); + da7219->tdm_en = true; + return 0; } @@ -1466,10 +1476,13 @@ static int da7219_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; - u8 dai_ctrl = 0, fs; + struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component); + u8 dai_ctrl = 0, dai_bclks_per_wclk = 0, fs; unsigned int channels; + int word_len = params_width(params); + int frame_size; - switch (params_width(params)) { + switch (word_len) { case 16: dai_ctrl |= DA7219_DAI_WORD_LENGTH_S16_LE; break; @@ -1533,6 +1546,23 @@ static int da7219_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } + /* + * If we're master, then we have a limited set of BCLK rates we + * support. For slave mode this isn't the case and the codec can detect + * the BCLK rate automatically. + */ + if (da7219->master && !da7219->tdm_en) { + frame_size = word_len * 2; + if (frame_size <= 32) + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32; + else + dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64; + + snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE, + DA7219_DAI_BCLKS_PER_WCLK_MASK, + dai_bclks_per_wclk); + } + snd_soc_component_update_bits(component, DA7219_DAI_CTRL, DA7219_DAI_WORD_LENGTH_MASK | DA7219_DAI_CH_NUM_MASK, diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h index 366cf46118a0..018819c631fb 100644 --- a/sound/soc/codecs/da7219.h +++ b/sound/soc/codecs/da7219.h @@ -830,6 +830,7 @@ struct da7219_priv { int clk_src; bool master; + bool tdm_en; bool alc_en; bool micbias_on_event; unsigned int mic_pga_delay; diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index b19d7a3e7a2c..5eeb0fe836a9 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1176,13 +1176,15 @@ static int hdac_hdmi_add_cvt(struct hdac_device *hdev, hda_nid_t nid) struct hdac_hdmi_cvt *cvt; char name[NAME_SIZE]; - cvt = kzalloc(sizeof(*cvt), GFP_KERNEL); + cvt = devm_kzalloc(&hdev->dev, sizeof(*cvt), GFP_KERNEL); if (!cvt) return -ENOMEM; cvt->nid = nid; sprintf(name, "cvt %d", cvt->nid); - cvt->name = kstrdup(name, GFP_KERNEL); + cvt->name = devm_kstrdup(&hdev->dev, name, GFP_KERNEL); + if (!cvt->name) + return -ENOMEM; list_add_tail(&cvt->head, &hdmi->cvt_list); hdmi->num_cvt++; @@ -1287,8 +1289,8 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin, mutex_unlock(&hdmi->pin_mutex); } -static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi, - struct hdac_hdmi_pin *pin) +static int hdac_hdmi_add_ports(struct hdac_device *hdev, + struct hdac_hdmi_pin *pin) { struct hdac_hdmi_port *ports; int max_ports = HDA_MAX_PORTS; @@ -1300,7 +1302,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi, * implemented. */ - ports = kcalloc(max_ports, sizeof(*ports), GFP_KERNEL); + ports = devm_kcalloc(&hdev->dev, max_ports, sizeof(*ports), GFP_KERNEL); if (!ports) return -ENOMEM; @@ -1319,14 +1321,14 @@ static int hdac_hdmi_add_pin(struct hdac_device *hdev, hda_nid_t nid) struct hdac_hdmi_pin *pin; int ret; - pin = kzalloc(sizeof(*pin), GFP_KERNEL); + pin = devm_kzalloc(&hdev->dev, sizeof(*pin), GFP_KERNEL); if (!pin) return -ENOMEM; pin->nid = nid; pin->mst_capable = false; pin->hdev = hdev; - ret = hdac_hdmi_add_ports(hdmi, pin); + ret = hdac_hdmi_add_ports(hdev, pin); if (ret < 0) return ret; @@ -1468,8 +1470,6 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_device *hdev, { hda_nid_t nid; int i, num_nodes; - struct hdac_hdmi_cvt *temp_cvt, *cvt_next; - struct hdac_hdmi_pin *temp_pin, *pin_next; struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); int ret; @@ -1497,51 +1497,35 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_device *hdev, case AC_WID_AUD_OUT: ret = hdac_hdmi_add_cvt(hdev, nid); if (ret < 0) - goto free_widgets; + return ret; break; case AC_WID_PIN: ret = hdac_hdmi_add_pin(hdev, nid); if (ret < 0) - goto free_widgets; + return ret; break; } } if (!hdmi->num_pin || !hdmi->num_cvt) { ret = -EIO; - goto free_widgets; + dev_err(&hdev->dev, "Bad pin/cvt setup in %s\n", __func__); + return ret; } ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt); if (ret) { dev_err(&hdev->dev, "Failed to create dais with err: %d\n", - ret); - goto free_widgets; + ret); + return ret; } *num_dais = hdmi->num_cvt; ret = hdac_hdmi_init_dai_map(hdev); if (ret < 0) - goto free_widgets; - - return ret; - -free_widgets: - list_for_each_entry_safe(temp_cvt, cvt_next, &hdmi->cvt_list, head) { - list_del(&temp_cvt->head); - kfree(temp_cvt->name); - kfree(temp_cvt); - } - - list_for_each_entry_safe(temp_pin, pin_next, &hdmi->pin_list, head) { - for (i = 0; i < temp_pin->num_ports; i++) - temp_pin->ports[i].pin = NULL; - kfree(temp_pin->ports); - list_del(&temp_pin->head); - kfree(temp_pin); - } - + dev_err(&hdev->dev, "Failed to init DAI map with err: %d\n", + ret); return ret; } @@ -1782,7 +1766,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device, * this is a new PCM device, create new pcm and * add to the pcm list */ - pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); + pcm = devm_kzalloc(&hdev->dev, sizeof(*pcm), GFP_KERNEL); if (!pcm) return -ENOMEM; pcm->pcm_id = device; @@ -1798,7 +1782,6 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device, dev_err(&hdev->dev, "chmap control add failed with err: %d for pcm: %d\n", err, device); - kfree(pcm); return err; } } @@ -2075,42 +2058,8 @@ static int hdac_hdmi_dev_probe(struct hdac_device *hdev) static int hdac_hdmi_dev_remove(struct hdac_device *hdev) { - struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); - struct hdac_hdmi_pin *pin, *pin_next; - struct hdac_hdmi_cvt *cvt, *cvt_next; - struct hdac_hdmi_pcm *pcm, *pcm_next; - struct hdac_hdmi_port *port, *port_next; - int i; - snd_hdac_display_power(hdev->bus, hdev->addr, false); - list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) { - pcm->cvt = NULL; - if (list_empty(&pcm->port_list)) - continue; - - list_for_each_entry_safe(port, port_next, - &pcm->port_list, head) - list_del(&port->head); - - list_del(&pcm->head); - kfree(pcm); - } - - list_for_each_entry_safe(cvt, cvt_next, &hdmi->cvt_list, head) { - list_del(&cvt->head); - kfree(cvt->name); - kfree(cvt); - } - - list_for_each_entry_safe(pin, pin_next, &hdmi->pin_list, head) { - for (i = 0; i < pin->num_ports; i++) - pin->ports[i].pin = NULL; - kfree(pin->ports); - list_del(&pin->head); - kfree(pin); - } - return 0; } diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c index 103ccbc5d55c..766354c73076 100644 --- a/sound/soc/codecs/jz4725b.c +++ b/sound/soc/codecs/jz4725b.c @@ -173,12 +173,12 @@ static const char * const jz4725b_codec_adc_src_texts[] = { "Mic 1", "Mic 2", "Line In", "Mixer", }; static const unsigned int jz4725b_codec_adc_src_values[] = { 0, 1, 2, 3, }; -static const SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum, - JZ4725B_CODEC_REG_CR3, - REG_CR3_INSEL_OFFSET, - REG_CR3_INSEL_MASK, - jz4725b_codec_adc_src_texts, - jz4725b_codec_adc_src_values); +static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum, + JZ4725B_CODEC_REG_CR3, + REG_CR3_INSEL_OFFSET, + REG_CR3_INSEL_MASK, + jz4725b_codec_adc_src_texts, + jz4725b_codec_adc_src_values); static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl = SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum); diff --git a/sound/soc/codecs/max9860.c b/sound/soc/codecs/max9860.c index de3d44e9199b..8be636fe6552 100644 --- a/sound/soc/codecs/max9860.c +++ b/sound/soc/codecs/max9860.c @@ -615,7 +615,8 @@ static int max9860_probe(struct i2c_client *i2c) max9860->dvddio_nb.notifier_call = max9860_dvddio_event; - ret = regulator_register_notifier(max9860->dvddio, &max9860->dvddio_nb); + ret = devm_regulator_register_notifier(max9860->dvddio, + &max9860->dvddio_nb); if (ret) dev_err(dev, "Failed to register DVDDIO notifier: %d\n", ret); diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index b7cf7cce95fe..368b6c09474b 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1,3 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2016, The Linux Foundation. All rights reserved. + #include <linux/module.h> #include <linux/err.h> #include <linux/kernel.h> diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 423bfebabed4..a63961861e55 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -1,14 +1,5 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2016, The Linux Foundation. All rights reserved. #include <linux/module.h> #include <linux/err.h> diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c index 809b7e9f03ca..c5fcc632f670 100644 --- a/sound/soc/codecs/pcm186x.c +++ b/sound/soc/codecs/pcm186x.c @@ -42,7 +42,7 @@ struct pcm186x_priv { bool is_master_mode; }; -static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50); +static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0); static const struct snd_kcontrol_new pcm1863_snd_controls[] = { SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L, @@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = { * Put the codec into SLEEP mode when not in use, allowing the * Energysense mechanism to operate. */ - SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0), + SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1), }; static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = { @@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = { * Put the codec into SLEEP mode when not in use, allowing the * Energysense mechanism to operate. */ - SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0), - SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0), + SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1), + SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1), }; static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = { diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c index 6714aa8d9026..32b26f1c2282 100644 --- a/sound/soc/codecs/pcm3060.c +++ b/sound/soc/codecs/pcm3060.c @@ -18,12 +18,39 @@ static int pcm3060_set_sysclk(struct snd_soc_dai *dai, int clk_id, { struct snd_soc_component *comp = dai->component; struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp); + unsigned int reg; + unsigned int val; if (dir != SND_SOC_CLOCK_IN) { dev_err(comp->dev, "unsupported sysclock dir: %d\n", dir); return -EINVAL; } + switch (clk_id) { + case PCM3060_CLK_DEF: + val = 0; + break; + + case PCM3060_CLK1: + val = (dai->id == PCM3060_DAI_ID_DAC ? PCM3060_REG_CSEL : 0); + break; + + case PCM3060_CLK2: + val = (dai->id == PCM3060_DAI_ID_DAC ? 0 : PCM3060_REG_CSEL); + break; + + default: + dev_err(comp->dev, "unsupported sysclock id: %d\n", clk_id); + return -EINVAL; + } + + if (dai->id == PCM3060_DAI_ID_DAC) + reg = PCM3060_REG67; + else + reg = PCM3060_REG72; + + regmap_update_bits(priv->regmap, reg, PCM3060_REG_CSEL, val); + priv->dai[dai->id].sclk_freq = freq; return 0; @@ -287,6 +314,14 @@ int pcm3060_probe(struct device *dev) int rc; struct pcm3060_priv *priv = dev_get_drvdata(dev); + /* soft reset */ + rc = regmap_update_bits(priv->regmap, PCM3060_REG64, + PCM3060_REG_MRST, 0); + if (rc) { + dev_err(dev, "failed to reset component, rc=%d\n", rc); + return rc; + } + if (dev->of_node) pcm3060_parse_dt(dev->of_node, priv); diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h index 6a027b4a845d..75931c9a9d85 100644 --- a/sound/soc/codecs/pcm3060.h +++ b/sound/soc/codecs/pcm3060.h @@ -17,6 +17,11 @@ extern const struct regmap_config pcm3060_regmap; #define PCM3060_DAI_ID_ADC 1 #define PCM3060_DAI_IDS_NUM 2 +/* ADC and DAC can be clocked from separate or same sources CLK1 and CLK2 */ +#define PCM3060_CLK_DEF 0 /* default: CLK1->ADC, CLK2->DAC */ +#define PCM3060_CLK1 1 +#define PCM3060_CLK2 2 + struct pcm3060_priv_dai { bool is_master; unsigned int sclk_freq; diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index ae3bd533eadb..62d05b01711f 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -1540,8 +1540,9 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) { - ret = regulator_register_notifier(pcm512x->supplies[i].consumer, - &pcm512x->supply_nb[i]); + ret = devm_regulator_register_notifier( + pcm512x->supplies[i].consumer, + &pcm512x->supply_nb[i]); if (ret != 0) { dev_err(dev, "Failed to register regulator notifier: %d\n", diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c index c6048d95c6d3..c544a1e35f5e 100644 --- a/sound/soc/codecs/tlv320aic31xx.c +++ b/sound/soc/codecs/tlv320aic31xx.c @@ -1274,8 +1274,9 @@ static int aic31xx_codec_probe(struct snd_soc_component *component) aic31xx->disable_nb[i].nb.notifier_call = aic31xx_regulator_event; aic31xx->disable_nb[i].aic31xx = aic31xx; - ret = regulator_register_notifier(aic31xx->supplies[i].consumer, - &aic31xx->disable_nb[i].nb); + ret = devm_regulator_register_notifier( + aic31xx->supplies[i].consumer, + &aic31xx->disable_nb[i].nb); if (ret) { dev_err(component->dev, "Failed to request regulator notifier: %d\n", @@ -1298,19 +1299,8 @@ static int aic31xx_codec_probe(struct snd_soc_component *component) return 0; } -static void aic31xx_codec_remove(struct snd_soc_component *component) -{ - struct aic31xx_priv *aic31xx = snd_soc_component_get_drvdata(component); - int i; - - for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++) - regulator_unregister_notifier(aic31xx->supplies[i].consumer, - &aic31xx->disable_nb[i].nb); -} - static const struct snd_soc_component_driver soc_codec_driver_aic31xx = { .probe = aic31xx_codec_probe, - .remove = aic31xx_codec_remove, .set_bias_level = aic31xx_set_bias_level, .controls = common31xx_snd_controls, .num_controls = ARRAY_SIZE(common31xx_snd_controls), diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index e1bfba62fc08..96f1526cb258 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -970,6 +970,7 @@ static int aic32x4_component_probe(struct snd_soc_component *component) if (gpio_is_valid(aic32x4->rstn_gpio)) { ndelay(10); gpio_set_value(aic32x4->rstn_gpio, 1); + mdelay(1); } snd_soc_component_write(component, AIC32X4_RESET, 0x01); diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 6aa0edf8c5ef..283583d1db60 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -1615,13 +1615,14 @@ static int aic3x_probe(struct snd_soc_component *component) for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) { aic3x->disable_nb[i].nb.notifier_call = aic3x_regulator_event; aic3x->disable_nb[i].aic3x = aic3x; - ret = regulator_register_notifier(aic3x->supplies[i].consumer, - &aic3x->disable_nb[i].nb); + ret = devm_regulator_register_notifier( + aic3x->supplies[i].consumer, + &aic3x->disable_nb[i].nb); if (ret) { dev_err(component->dev, "Failed to request regulator notifier: %d\n", ret); - goto err_notif; + return ret; } } @@ -1679,29 +1680,11 @@ static int aic3x_probe(struct snd_soc_component *component) aic3x_add_widgets(component); return 0; - -err_notif: - while (i--) - regulator_unregister_notifier(aic3x->supplies[i].consumer, - &aic3x->disable_nb[i].nb); - return ret; -} - -static void aic3x_remove(struct snd_soc_component *component) -{ - struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component); - int i; - - list_del(&aic3x->list); - for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) - regulator_unregister_notifier(aic3x->supplies[i].consumer, - &aic3x->disable_nb[i].nb); } static const struct snd_soc_component_driver soc_component_dev_aic3x = { .set_bias_level = aic3x_set_bias_level, .probe = aic3x_probe, - .remove = aic3x_remove, .controls = aic3x_snd_controls, .num_controls = ARRAY_SIZE(aic3x_snd_controls), .dapm_widgets = aic3x_dapm_widgets, diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c index 1fedf74da705..546ea735f534 100644 --- a/sound/soc/codecs/wm8741.c +++ b/sound/soc/codecs/wm8741.c @@ -196,7 +196,7 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_component *component = dai->component; struct wm8741_priv *wm8741 = snd_soc_component_get_drvdata(component); - unsigned int iface; + unsigned int iface, mode; int i; /* The set of sample rates that can be supported depends on the @@ -240,11 +240,21 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } + /* oversampling rate */ + if (params_rate(params) > 96000) + mode = 0x40; + else if (params_rate(params) > 48000) + mode = 0x20; + else + mode = 0x00; + dev_dbg(component->dev, "wm8741_hw_params: bit size param = %d, rate param = %d", params_width(params), params_rate(params)); snd_soc_component_update_bits(component, WM8741_FORMAT_CONTROL, WM8741_IWL_MASK, iface); + snd_soc_component_update_bits(component, WM8741_MODE_CONTROL_1, WM8741_OSR_MASK, + mode); return 0; } @@ -358,6 +368,15 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai, return 0; } +static int wm8741_mute(struct snd_soc_dai *codec_dai, int mute) +{ + struct snd_soc_component *component = codec_dai->component; + + snd_soc_component_update_bits(component, WM8741_VOLUME_CONTROL, + WM8741_SOFT_MASK, !!mute << WM8741_SOFT_SHIFT); + return 0; +} + #define WM8741_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \ @@ -371,6 +390,7 @@ static const struct snd_soc_dai_ops wm8741_dai_ops = { .hw_params = wm8741_hw_params, .set_sysclk = wm8741_set_dai_sysclk, .set_fmt = wm8741_set_dai_fmt, + .digital_mute = wm8741_mute, }; static struct snd_soc_dai_driver wm8741_dai = { diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c index 806245c70f8b..37467c512597 100644 --- a/sound/soc/codecs/wm8770.c +++ b/sound/soc/codecs/wm8770.c @@ -666,8 +666,9 @@ static int wm8770_spi_probe(struct spi_device *spi) /* This should really be moved into the regulator core */ for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) { - ret = regulator_register_notifier(wm8770->supplies[i].consumer, - &wm8770->disable_nb[i]); + ret = devm_regulator_register_notifier( + wm8770->supplies[i].consumer, + &wm8770->disable_nb[i]); if (ret) { dev_err(&spi->dev, "Failed to register regulator notifier: %d\n", @@ -687,25 +688,12 @@ static int wm8770_spi_probe(struct spi_device *spi) return ret; } -static int wm8770_spi_remove(struct spi_device *spi) -{ - struct wm8770_priv *wm8770 = spi_get_drvdata(spi); - int i; - - for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i) - regulator_unregister_notifier(wm8770->supplies[i].consumer, - &wm8770->disable_nb[i]); - - return 0; -} - static struct spi_driver wm8770_spi_driver = { .driver = { .name = "wm8770", .of_match_table = wm8770_of_match, }, .probe = wm8770_spi_probe, - .remove = wm8770_spi_remove }; module_spi_driver(wm8770_spi_driver); diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index efd8910b1ff7..467ed78dd2df 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3424,8 +3424,9 @@ static int wm8962_probe(struct snd_soc_component *component) /* This should really be moved into the regulator core */ for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++) { - ret = regulator_register_notifier(wm8962->supplies[i].consumer, - &wm8962->disable_nb[i]); + ret = devm_regulator_register_notifier( + wm8962->supplies[i].consumer, + &wm8962->disable_nb[i]); if (ret != 0) { dev_err(component->dev, "Failed to register regulator notifier: %d\n", @@ -3467,15 +3468,11 @@ static int wm8962_probe(struct snd_soc_component *component) static void wm8962_remove(struct snd_soc_component *component) { struct wm8962_priv *wm8962 = snd_soc_component_get_drvdata(component); - int i; cancel_delayed_work_sync(&wm8962->mic_work); wm8962_free_gpio(component); wm8962_free_beep(component); - for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++) - regulator_unregister_notifier(wm8962->supplies[i].consumer, - &wm8962->disable_nb[i]); } static const struct snd_soc_component_driver soc_component_dev_wm8962 = { diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c index 68c99fe37097..79ee91906bb9 100644 --- a/sound/soc/codecs/wm8995.c +++ b/sound/soc/codecs/wm8995.c @@ -1995,20 +1995,6 @@ static int wm8995_set_bias_level(struct snd_soc_component *component, return 0; } -static void wm8995_remove(struct snd_soc_component *component) -{ - struct wm8995_priv *wm8995; - int i; - - wm8995 = snd_soc_component_get_drvdata(component); - - for (i = 0; i < ARRAY_SIZE(wm8995->supplies); ++i) - regulator_unregister_notifier(wm8995->supplies[i].consumer, - &wm8995->disable_nb[i]); - - regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); -} - static int wm8995_probe(struct snd_soc_component *component) { struct wm8995_priv *wm8995; @@ -2021,8 +2007,9 @@ static int wm8995_probe(struct snd_soc_component *component) for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) wm8995->supplies[i].supply = wm8995_supply_names[i]; - ret = regulator_bulk_get(component->dev, ARRAY_SIZE(wm8995->supplies), - wm8995->supplies); + ret = devm_regulator_bulk_get(component->dev, + ARRAY_SIZE(wm8995->supplies), + wm8995->supplies); if (ret) { dev_err(component->dev, "Failed to request supplies: %d\n", ret); return ret; @@ -2039,8 +2026,9 @@ static int wm8995_probe(struct snd_soc_component *component) /* This should really be moved into the regulator core */ for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) { - ret = regulator_register_notifier(wm8995->supplies[i].consumer, - &wm8995->disable_nb[i]); + ret = devm_regulator_register_notifier( + wm8995->supplies[i].consumer, + &wm8995->disable_nb[i]); if (ret) { dev_err(component->dev, "Failed to register regulator notifier: %d\n", @@ -2052,7 +2040,7 @@ static int wm8995_probe(struct snd_soc_component *component) wm8995->supplies); if (ret) { dev_err(component->dev, "Failed to enable supplies: %d\n", ret); - goto err_reg_get; + return ret; } ret = snd_soc_component_read32(component, WM8995_SOFTWARE_RESET); @@ -2099,8 +2087,6 @@ static int wm8995_probe(struct snd_soc_component *component) err_reg_enable: regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); -err_reg_get: - regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); return ret; } @@ -2188,7 +2174,6 @@ static struct snd_soc_dai_driver wm8995_dai[] = { static const struct snd_soc_component_driver soc_component_dev_wm8995 = { .probe = wm8995_probe, - .remove = wm8995_remove, .set_bias_level = wm8995_set_bias_level, .controls = wm8995_snd_controls, .num_controls = ARRAY_SIZE(wm8995_snd_controls), diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c index 91711f8958c5..ab04ea18c312 100644 --- a/sound/soc/codecs/wm8996.c +++ b/sound/soc/codecs/wm8996.c @@ -2801,8 +2801,9 @@ static int wm8996_i2c_probe(struct i2c_client *i2c, /* This should really be moved into the regulator core */ for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) { - ret = regulator_register_notifier(wm8996->supplies[i].consumer, - &wm8996->disable_nb[i]); + ret = devm_regulator_register_notifier( + wm8996->supplies[i].consumer, + &wm8996->disable_nb[i]); if (ret != 0) { dev_err(&i2c->dev, "Failed to register regulator notifier: %d\n", @@ -3071,16 +3072,12 @@ err: static int wm8996_i2c_remove(struct i2c_client *client) { struct wm8996_priv *wm8996 = i2c_get_clientdata(client); - int i; wm8996_free_gpio(wm8996); if (wm8996->pdata.ldo_ena > 0) { gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0); gpio_free(wm8996->pdata.ldo_ena); } - for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) - regulator_unregister_notifier(wm8996->supplies[i].consumer, - &wm8996->disable_nb[i]); return 0; } diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 1dd291cebe67..b93fdc8d2d6f 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -46,6 +46,13 @@ #define adsp_dbg(_dsp, fmt, ...) \ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__) +#define compr_err(_obj, fmt, ...) \ + adsp_err(_obj->dsp, "%s: " fmt, _obj->name ? _obj->name : "legacy", \ + ##__VA_ARGS__) +#define compr_dbg(_obj, fmt, ...) \ + adsp_dbg(_obj->dsp, "%s: " fmt, _obj->name ? _obj->name : "legacy", \ + ##__VA_ARGS__) + #define ADSP1_CONTROL_1 0x00 #define ADSP1_CONTROL_2 0x02 #define ADSP1_CONTROL_3 0x03 @@ -310,6 +317,12 @@ struct wm_adsp_alg_xm_struct { __be64 smoothed_power; }; +struct wm_adsp_host_buf_coeff_v1 { + __be32 host_buf_ptr; /* Host buffer pointer */ + __be32 versions; /* Version numbers */ + __be32 name[4]; /* The buffer name */ +}; + struct wm_adsp_buffer { __be32 buf1_base; /* Base addr of first buffer area */ __be32 buf1_size; /* Size of buf1 area in DSP words */ @@ -334,6 +347,7 @@ struct wm_adsp_buffer { struct wm_adsp_compr; struct wm_adsp_compr_buf { + struct list_head list; struct wm_adsp *dsp; struct wm_adsp_compr *compr; @@ -344,9 +358,13 @@ struct wm_adsp_compr_buf { u32 irq_count; int read_index; int avail; + int host_buf_mem_type; + + char *name; }; struct wm_adsp_compr { + struct list_head list; struct wm_adsp *dsp; struct wm_adsp_compr_buf *buf; @@ -357,6 +375,8 @@ struct wm_adsp_compr { unsigned int copied_total; unsigned int sample_rate; + + const char *name; }; #define WM_ADSP_DATA_WORD_SIZE 3 @@ -374,6 +394,11 @@ struct wm_adsp_compr { #define ALG_XM_FIELD(field) \ (offsetof(struct wm_adsp_alg_xm_struct, field) / sizeof(__be32)) +#define HOST_BUF_COEFF_SUPPORTED_COMPAT_VER 1 + +#define HOST_BUF_COEFF_COMPAT_VER_MASK 0xFF00 +#define HOST_BUF_COEFF_COMPAT_VER_SHIFT 8 + static int wm_adsp_buffer_init(struct wm_adsp *dsp); static int wm_adsp_buffer_free(struct wm_adsp *dsp); @@ -707,7 +732,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol, mutex_lock(&dsp[e->shift_l].pwr_lock); - if (dsp[e->shift_l].booted || dsp[e->shift_l].compr) + if (dsp[e->shift_l].booted || !list_empty(&dsp[e->shift_l].compr_list)) ret = -EBUSY; else dsp[e->shift_l].fw = ucontrol->value.enumerated.item[0]; @@ -2429,6 +2454,8 @@ static int wm_adsp_common_init(struct wm_adsp *dsp) INIT_LIST_HEAD(&dsp->alg_regions); INIT_LIST_HEAD(&dsp->ctl_list); + INIT_LIST_HEAD(&dsp->compr_list); + INIT_LIST_HEAD(&dsp->buffer_list); mutex_init(&dsp->pwr_lock); @@ -2971,14 +2998,19 @@ static inline int wm_adsp_compr_attached(struct wm_adsp_compr *compr) static int wm_adsp_compr_attach(struct wm_adsp_compr *compr) { - /* - * Note this will be more complex once each DSP can support multiple - * streams - */ - if (!compr->dsp->buffer) + struct wm_adsp_compr_buf *buf = NULL, *tmp; + + list_for_each_entry(tmp, &compr->dsp->buffer_list, list) { + if (!tmp->name || !strcmp(compr->name, tmp->name)) { + buf = tmp; + break; + } + } + + if (!buf) return -EINVAL; - compr->buf = compr->dsp->buffer; + compr->buf = buf; compr->buf->compr = compr; return 0; @@ -3001,28 +3033,33 @@ static void wm_adsp_compr_detach(struct wm_adsp_compr *compr) int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream) { - struct wm_adsp_compr *compr; + struct wm_adsp_compr *compr, *tmp; + struct snd_soc_pcm_runtime *rtd = stream->private_data; int ret = 0; mutex_lock(&dsp->pwr_lock); if (wm_adsp_fw[dsp->fw].num_caps == 0) { - adsp_err(dsp, "Firmware does not support compressed API\n"); + adsp_err(dsp, "%s: Firmware does not support compressed API\n", + rtd->codec_dai->name); ret = -ENXIO; goto out; } if (wm_adsp_fw[dsp->fw].compr_direction != stream->direction) { - adsp_err(dsp, "Firmware does not support stream direction\n"); + adsp_err(dsp, "%s: Firmware does not support stream direction\n", + rtd->codec_dai->name); ret = -EINVAL; goto out; } - if (dsp->compr) { - /* It is expect this limitation will be removed in future */ - adsp_err(dsp, "Only a single stream supported per DSP\n"); - ret = -EBUSY; - goto out; + list_for_each_entry(tmp, &dsp->compr_list, list) { + if (!strcmp(tmp->name, rtd->codec_dai->name)) { + adsp_err(dsp, "%s: Only a single stream supported per dai\n", + rtd->codec_dai->name); + ret = -EBUSY; + goto out; + } } compr = kzalloc(sizeof(*compr), GFP_KERNEL); @@ -3033,8 +3070,9 @@ int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream) compr->dsp = dsp; compr->stream = stream; + compr->name = rtd->codec_dai->name; - dsp->compr = compr; + list_add_tail(&compr->list, &dsp->compr_list); stream->runtime->private_data = compr; @@ -3053,7 +3091,7 @@ int wm_adsp_compr_free(struct snd_compr_stream *stream) mutex_lock(&dsp->pwr_lock); wm_adsp_compr_detach(compr); - dsp->compr = NULL; + list_del(&compr->list); kfree(compr->raw_buf); kfree(compr); @@ -3078,9 +3116,9 @@ static int wm_adsp_compr_check_params(struct snd_compr_stream *stream, params->buffer.fragments < WM_ADSP_MIN_FRAGMENTS || params->buffer.fragments > WM_ADSP_MAX_FRAGMENTS || params->buffer.fragment_size % WM_ADSP_DATA_WORD_SIZE) { - adsp_err(dsp, "Invalid buffer fragsize=%d fragments=%d\n", - params->buffer.fragment_size, - params->buffer.fragments); + compr_err(compr, "Invalid buffer fragsize=%d fragments=%d\n", + params->buffer.fragment_size, + params->buffer.fragments); return -EINVAL; } @@ -3108,9 +3146,9 @@ static int wm_adsp_compr_check_params(struct snd_compr_stream *stream, return 0; } - adsp_err(dsp, "Invalid params id=%u ch=%u,%u rate=%u fmt=%u\n", - params->codec.id, params->codec.ch_in, params->codec.ch_out, - params->codec.sample_rate, params->codec.format); + compr_err(compr, "Invalid params id=%u ch=%u,%u rate=%u fmt=%u\n", + params->codec.id, params->codec.ch_in, params->codec.ch_out, + params->codec.sample_rate, params->codec.format); return -EINVAL; } @@ -3132,8 +3170,8 @@ int wm_adsp_compr_set_params(struct snd_compr_stream *stream, compr->size = params->buffer; - adsp_dbg(compr->dsp, "fragment_size=%d fragments=%d\n", - compr->size.fragment_size, compr->size.fragments); + compr_dbg(compr, "fragment_size=%d fragments=%d\n", + compr->size.fragment_size, compr->size.fragments); size = wm_adsp_compr_frag_words(compr) * sizeof(*compr->raw_buf); compr->raw_buf = kmalloc(size, GFP_DMA | GFP_KERNEL); @@ -3219,24 +3257,106 @@ static int wm_adsp_write_data_word(struct wm_adsp *dsp, int mem_type, static inline int wm_adsp_buffer_read(struct wm_adsp_compr_buf *buf, unsigned int field_offset, u32 *data) { - return wm_adsp_read_data_word(buf->dsp, WMFW_ADSP2_XM, + return wm_adsp_read_data_word(buf->dsp, buf->host_buf_mem_type, buf->host_buf_ptr + field_offset, data); } static inline int wm_adsp_buffer_write(struct wm_adsp_compr_buf *buf, unsigned int field_offset, u32 data) { - return wm_adsp_write_data_word(buf->dsp, WMFW_ADSP2_XM, + return wm_adsp_write_data_word(buf->dsp, buf->host_buf_mem_type, buf->host_buf_ptr + field_offset, data); } -static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf) +static void wm_adsp_remove_padding(u32 *buf, int nwords, int data_word_size) +{ + u8 *pack_in = (u8 *)buf; + u8 *pack_out = (u8 *)buf; + int i, j; + + /* Remove the padding bytes from the data read from the DSP */ + for (i = 0; i < nwords; i++) { + for (j = 0; j < data_word_size; j++) + *pack_out++ = *pack_in++; + + pack_in += sizeof(*buf) - data_word_size; + } +} + +static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf) +{ + const struct wm_adsp_fw_caps *caps = wm_adsp_fw[buf->dsp->fw].caps; + struct wm_adsp_buffer_region *region; + u32 offset = 0; + int i, ret; + + buf->regions = kcalloc(caps->num_regions, sizeof(*buf->regions), + GFP_KERNEL); + if (!buf->regions) + return -ENOMEM; + + for (i = 0; i < caps->num_regions; ++i) { + region = &buf->regions[i]; + + region->offset = offset; + region->mem_type = caps->region_defs[i].mem_type; + + ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset, + ®ion->base_addr); + if (ret < 0) + return ret; + + ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset, + &offset); + if (ret < 0) + return ret; + + region->cumulative_size = offset; + + compr_dbg(buf, + "region=%d type=%d base=%08x off=%08x size=%08x\n", + i, region->mem_type, region->base_addr, + region->offset, region->cumulative_size); + } + + return 0; +} + +static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf) +{ + buf->irq_count = 0xFFFFFFFF; + buf->read_index = -1; + buf->avail = 0; +} + +static struct wm_adsp_compr_buf *wm_adsp_buffer_alloc(struct wm_adsp *dsp) +{ + struct wm_adsp_compr_buf *buf; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return NULL; + + buf->dsp = dsp; + + wm_adsp_buffer_clear(buf); + + list_add_tail(&buf->list, &dsp->buffer_list); + + return buf; +} + +static int wm_adsp_buffer_parse_legacy(struct wm_adsp *dsp) { struct wm_adsp_alg_region *alg_region; - struct wm_adsp *dsp = buf->dsp; + struct wm_adsp_compr_buf *buf; u32 xmalg, addr, magic; int i, ret; + buf = wm_adsp_buffer_alloc(dsp); + if (!buf) + return -ENOMEM; + alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id); xmalg = sizeof(struct wm_adsp_system_config_xm_hdr) / sizeof(__be32); @@ -3246,7 +3366,7 @@ static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf) return ret; if (magic != WM_ADSP_ALG_XM_STRUCT_MAGIC) - return -EINVAL; + return -ENODEV; addr = alg_region->base + xmalg + ALG_XM_FIELD(host_buf_ptr); for (i = 0; i < 5; ++i) { @@ -3264,48 +3384,30 @@ static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf) if (!buf->host_buf_ptr) return -EIO; - adsp_dbg(dsp, "host_buf_ptr=%x\n", buf->host_buf_ptr); - - return 0; -} - -static struct wm_coeff_ctl * -wm_adsp_find_host_buffer_ctrl(struct wm_adsp_compr_buf *buf) -{ - struct wm_adsp *dsp = buf->dsp; - struct wm_coeff_ctl *ctl; - - list_for_each_entry(ctl, &dsp->ctl_list, list) { - if (ctl->type != WMFW_CTL_TYPE_HOST_BUFFER) - continue; + buf->host_buf_mem_type = WMFW_ADSP2_XM; - if (!ctl->enabled) - continue; + ret = wm_adsp_buffer_populate(buf); + if (ret < 0) + return ret; - return ctl; - } + compr_dbg(buf, "legacy host_buf_ptr=%x\n", buf->host_buf_ptr); - return NULL; + return 0; } -static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf) +static int wm_adsp_buffer_parse_coeff(struct wm_coeff_ctl *ctl) { - struct wm_adsp *dsp = buf->dsp; - struct wm_coeff_ctl *ctl; - unsigned int reg; - u32 val; - int i, ret; - - ctl = wm_adsp_find_host_buffer_ctrl(buf); - if (!ctl) - return wm_adsp_legacy_host_buf_addr(buf); + struct wm_adsp_host_buf_coeff_v1 coeff_v1; + struct wm_adsp_compr_buf *buf; + unsigned int val, reg; + int ret, i; ret = wm_coeff_base_reg(ctl, ®); if (ret) return ret; for (i = 0; i < 5; ++i) { - ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val)); + ret = regmap_raw_read(ctl->dsp->regmap, reg, &val, sizeof(val)); if (ret < 0) return ret; @@ -3315,108 +3417,130 @@ static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf) usleep_range(1000, 2000); } - if (!val) + if (!val) { + adsp_err(ctl->dsp, "Failed to acquire host buffer\n"); return -EIO; + } + + buf = wm_adsp_buffer_alloc(ctl->dsp); + if (!buf) + return -ENOMEM; + buf->host_buf_mem_type = ctl->alg_region.type; buf->host_buf_ptr = be32_to_cpu(val); - adsp_dbg(dsp, "host_buf_ptr=%x\n", buf->host_buf_ptr); - return 0; -} + ret = wm_adsp_buffer_populate(buf); + if (ret < 0) + return ret; -static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf) -{ - const struct wm_adsp_fw_caps *caps = wm_adsp_fw[buf->dsp->fw].caps; - struct wm_adsp_buffer_region *region; - u32 offset = 0; - int i, ret; + /* + * v0 host_buffer coefficients didn't have versioning, so if the + * control is one word, assume version 0. + */ + if (ctl->len == 4) { + compr_dbg(buf, "host_buf_ptr=%x\n", buf->host_buf_ptr); + return 0; + } - for (i = 0; i < caps->num_regions; ++i) { - region = &buf->regions[i]; + ret = regmap_raw_read(ctl->dsp->regmap, reg, &coeff_v1, + sizeof(coeff_v1)); + if (ret < 0) + return ret; - region->offset = offset; - region->mem_type = caps->region_defs[i].mem_type; + coeff_v1.versions = be32_to_cpu(coeff_v1.versions); + val = coeff_v1.versions & HOST_BUF_COEFF_COMPAT_VER_MASK; + val >>= HOST_BUF_COEFF_COMPAT_VER_SHIFT; - ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset, - ®ion->base_addr); - if (ret < 0) - return ret; + if (val > HOST_BUF_COEFF_SUPPORTED_COMPAT_VER) { + adsp_err(ctl->dsp, + "Host buffer coeff ver %u > supported version %u\n", + val, HOST_BUF_COEFF_SUPPORTED_COMPAT_VER); + return -EINVAL; + } - ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset, - &offset); - if (ret < 0) - return ret; + for (i = 0; i < ARRAY_SIZE(coeff_v1.name); i++) + coeff_v1.name[i] = be32_to_cpu(coeff_v1.name[i]); - region->cumulative_size = offset; + wm_adsp_remove_padding((u32 *)&coeff_v1.name, + ARRAY_SIZE(coeff_v1.name), + WM_ADSP_DATA_WORD_SIZE); - adsp_dbg(buf->dsp, - "region=%d type=%d base=%08x off=%08x size=%08x\n", - i, region->mem_type, region->base_addr, - region->offset, region->cumulative_size); - } + buf->name = kasprintf(GFP_KERNEL, "%s-dsp-%s", ctl->dsp->part, + (char *)&coeff_v1.name); - return 0; -} + compr_dbg(buf, "host_buf_ptr=%x coeff version %u\n", + buf->host_buf_ptr, val); -static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf) -{ - buf->irq_count = 0xFFFFFFFF; - buf->read_index = -1; - buf->avail = 0; + return val; } static int wm_adsp_buffer_init(struct wm_adsp *dsp) { - struct wm_adsp_compr_buf *buf; + struct wm_coeff_ctl *ctl; int ret; - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - buf->dsp = dsp; - - wm_adsp_buffer_clear(buf); + list_for_each_entry(ctl, &dsp->ctl_list, list) { + if (ctl->type != WMFW_CTL_TYPE_HOST_BUFFER) + continue; - ret = wm_adsp_buffer_locate(buf); - if (ret < 0) { - adsp_err(dsp, "Failed to acquire host buffer: %d\n", ret); - goto err_buffer; - } + if (!ctl->enabled) + continue; - buf->regions = kcalloc(wm_adsp_fw[dsp->fw].caps->num_regions, - sizeof(*buf->regions), GFP_KERNEL); - if (!buf->regions) { - ret = -ENOMEM; - goto err_buffer; + ret = wm_adsp_buffer_parse_coeff(ctl); + if (ret < 0) { + adsp_err(dsp, "Failed to parse coeff: %d\n", ret); + goto error; + } else if (ret == 0) { + /* Only one buffer supported for version 0 */ + return 0; + } } - ret = wm_adsp_buffer_populate(buf); - if (ret < 0) { - adsp_err(dsp, "Failed to populate host buffer: %d\n", ret); - goto err_regions; + if (list_empty(&dsp->buffer_list)) { + /* Fall back to legacy support */ + ret = wm_adsp_buffer_parse_legacy(dsp); + if (ret) { + adsp_err(dsp, "Failed to parse legacy: %d\n", ret); + goto error; + } } - dsp->buffer = buf; - return 0; -err_regions: - kfree(buf->regions); -err_buffer: - kfree(buf); +error: + wm_adsp_buffer_free(dsp); return ret; } static int wm_adsp_buffer_free(struct wm_adsp *dsp) { - if (dsp->buffer) { - wm_adsp_compr_detach(dsp->buffer->compr); + struct wm_adsp_compr_buf *buf, *tmp; + + list_for_each_entry_safe(buf, tmp, &dsp->buffer_list, list) { + if (buf->compr) + wm_adsp_compr_detach(buf->compr); + + kfree(buf->name); + kfree(buf->regions); + list_del(&buf->list); + kfree(buf); + } - kfree(dsp->buffer->regions); - kfree(dsp->buffer); + return 0; +} + +static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf) +{ + int ret; - dsp->buffer = NULL; + ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error); + if (ret < 0) { + adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret); + return ret; + } + if (buf->error != 0) { + adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error); + return -EIO; } return 0; @@ -3428,7 +3552,7 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd) struct wm_adsp *dsp = compr->dsp; int ret = 0; - adsp_dbg(dsp, "Trigger: %d\n", cmd); + compr_dbg(compr, "Trigger: %d\n", cmd); mutex_lock(&dsp->pwr_lock); @@ -3437,12 +3561,16 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd) if (!wm_adsp_compr_attached(compr)) { ret = wm_adsp_compr_attach(compr); if (ret < 0) { - adsp_err(dsp, "Failed to link buffer and stream: %d\n", - ret); + compr_err(compr, "Failed to link buffer and stream: %d\n", + ret); break; } } + ret = wm_adsp_buffer_get_error(compr->buf); + if (ret < 0) + break; + wm_adsp_buffer_clear(compr->buf); /* Trigger the IRQ at one fragment of data */ @@ -3450,8 +3578,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd) HOST_BUFFER_FIELD(high_water_mark), wm_adsp_compr_frag_words(compr)); if (ret < 0) { - adsp_err(dsp, "Failed to set high water mark: %d\n", - ret); + compr_err(compr, "Failed to set high water mark: %d\n", + ret); break; } break; @@ -3492,7 +3620,7 @@ static int wm_adsp_buffer_update_avail(struct wm_adsp_compr_buf *buf) read_index = sign_extend32(next_read_index, 23); if (read_index < 0) { - adsp_dbg(buf->dsp, "Avail check on unstarted stream\n"); + compr_dbg(buf, "Avail check on unstarted stream\n"); return 0; } @@ -3510,31 +3638,14 @@ static int wm_adsp_buffer_update_avail(struct wm_adsp_compr_buf *buf) if (avail < 0) avail += wm_adsp_buffer_size(buf); - adsp_dbg(buf->dsp, "readindex=0x%x, writeindex=0x%x, avail=%d\n", - buf->read_index, write_index, avail * WM_ADSP_DATA_WORD_SIZE); + compr_dbg(buf, "readindex=0x%x, writeindex=0x%x, avail=%d\n", + buf->read_index, write_index, avail * WM_ADSP_DATA_WORD_SIZE); buf->avail = avail; return 0; } -static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf) -{ - int ret; - - ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error); - if (ret < 0) { - adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret); - return ret; - } - if (buf->error != 0) { - adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error); - return -EIO; - } - - return 0; -} - int wm_adsp_compr_handle_irq(struct wm_adsp *dsp) { struct wm_adsp_compr_buf *buf; @@ -3543,39 +3654,40 @@ int wm_adsp_compr_handle_irq(struct wm_adsp *dsp) mutex_lock(&dsp->pwr_lock); - buf = dsp->buffer; - compr = dsp->compr; - - if (!buf) { + if (list_empty(&dsp->buffer_list)) { ret = -ENODEV; goto out; } adsp_dbg(dsp, "Handling buffer IRQ\n"); - ret = wm_adsp_buffer_get_error(buf); - if (ret < 0) - goto out_notify; /* Wake poll to report error */ + list_for_each_entry(buf, &dsp->buffer_list, list) { + compr = buf->compr; - ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(irq_count), - &buf->irq_count); - if (ret < 0) { - adsp_err(dsp, "Failed to get irq_count: %d\n", ret); - goto out; - } + ret = wm_adsp_buffer_get_error(buf); + if (ret < 0) + goto out_notify; /* Wake poll to report error */ - ret = wm_adsp_buffer_update_avail(buf); - if (ret < 0) { - adsp_err(dsp, "Error reading avail: %d\n", ret); - goto out; - } + ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(irq_count), + &buf->irq_count); + if (ret < 0) { + compr_err(buf, "Failed to get irq_count: %d\n", ret); + goto out; + } - if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2) - ret = WM_ADSP_COMPR_VOICE_TRIGGER; + ret = wm_adsp_buffer_update_avail(buf); + if (ret < 0) { + compr_err(buf, "Error reading avail: %d\n", ret); + goto out; + } + + if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2) + ret = WM_ADSP_COMPR_VOICE_TRIGGER; out_notify: - if (compr && compr->stream) - snd_compr_fragment_elapsed(compr->stream); + if (compr && compr->stream) + snd_compr_fragment_elapsed(compr->stream); + } out: mutex_unlock(&dsp->pwr_lock); @@ -3589,8 +3701,7 @@ static int wm_adsp_buffer_reenable_irq(struct wm_adsp_compr_buf *buf) if (buf->irq_count & 0x01) return 0; - adsp_dbg(buf->dsp, "Enable IRQ(0x%x) for next fragment\n", - buf->irq_count); + compr_dbg(buf, "Enable IRQ(0x%x) for next fragment\n", buf->irq_count); buf->irq_count |= 0x01; @@ -3606,7 +3717,7 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream, struct wm_adsp_compr_buf *buf; int ret = 0; - adsp_dbg(dsp, "Pointer request\n"); + compr_dbg(compr, "Pointer request\n"); mutex_lock(&dsp->pwr_lock); @@ -3621,7 +3732,7 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream, if (buf->avail < wm_adsp_compr_frag_words(compr)) { ret = wm_adsp_buffer_update_avail(buf); if (ret < 0) { - adsp_err(dsp, "Error reading avail: %d\n", ret); + compr_err(compr, "Error reading avail: %d\n", ret); goto out; } @@ -3640,9 +3751,8 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream, ret = wm_adsp_buffer_reenable_irq(buf); if (ret < 0) { - adsp_err(dsp, - "Failed to re-enable buffer IRQ: %d\n", - ret); + compr_err(compr, "Failed to re-enable buffer IRQ: %d\n", + ret); goto out; } } @@ -3662,11 +3772,9 @@ EXPORT_SYMBOL_GPL(wm_adsp_compr_pointer); static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target) { struct wm_adsp_compr_buf *buf = compr->buf; - u8 *pack_in = (u8 *)compr->raw_buf; - u8 *pack_out = (u8 *)compr->raw_buf; unsigned int adsp_addr; int mem_type, nwords, max_read; - int i, j, ret; + int i, ret; /* Calculate read parameters */ for (i = 0; i < wm_adsp_fw[buf->dsp->fw].caps->num_regions; ++i) @@ -3698,13 +3806,7 @@ static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target) if (ret < 0) return ret; - /* Remove the padding bytes from the data read from the DSP */ - for (i = 0; i < nwords; i++) { - for (j = 0; j < WM_ADSP_DATA_WORD_SIZE; j++) - *pack_out++ = *pack_in++; - - pack_in += sizeof(*(compr->raw_buf)) - WM_ADSP_DATA_WORD_SIZE; - } + wm_adsp_remove_padding(compr->raw_buf, nwords, WM_ADSP_DATA_WORD_SIZE); /* update read index to account for words read */ buf->read_index += nwords; @@ -3725,11 +3827,10 @@ static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target) static int wm_adsp_compr_read(struct wm_adsp_compr *compr, char __user *buf, size_t count) { - struct wm_adsp *dsp = compr->dsp; int ntotal = 0; int nwords, nbytes; - adsp_dbg(dsp, "Requested read of %zu bytes\n", count); + compr_dbg(compr, "Requested read of %zu bytes\n", count); if (!compr->buf || compr->buf->error) { snd_compr_stop_error(compr->stream, SNDRV_PCM_STATE_XRUN); @@ -3741,17 +3842,18 @@ static int wm_adsp_compr_read(struct wm_adsp_compr *compr, do { nwords = wm_adsp_buffer_capture_block(compr, count); if (nwords < 0) { - adsp_err(dsp, "Failed to capture block: %d\n", nwords); + compr_err(compr, "Failed to capture block: %d\n", + nwords); return nwords; } nbytes = nwords * WM_ADSP_DATA_WORD_SIZE; - adsp_dbg(dsp, "Read %d bytes\n", nbytes); + compr_dbg(compr, "Read %d bytes\n", nbytes); if (copy_to_user(buf + ntotal, compr->raw_buf, nbytes)) { - adsp_err(dsp, "Failed to copy data to user: %d, %d\n", - ntotal, nbytes); + compr_err(compr, "Failed to copy data to user: %d, %d\n", + ntotal, nbytes); return -EFAULT; } diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h index 4b8778b0b06c..59e07ad16329 100644 --- a/sound/soc/codecs/wm_adsp.h +++ b/sound/soc/codecs/wm_adsp.h @@ -90,8 +90,8 @@ struct wm_adsp { struct work_struct boot_work; - struct wm_adsp_compr *compr; - struct wm_adsp_compr_buf *buffer; + struct list_head compr_list; + struct list_head buffer_list; struct mutex pwr_lock; diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index 57b484768a58..afe67c865330 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -398,7 +398,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) break; case SND_SOC_DAIFMT_RIGHT_J: /* Data on rising edge of bclk, frame high, right aligned */ - xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA; + xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP; + xcr |= ESAI_xCR_xWA; break; case SND_SOC_DAIFMT_DSP_A: /* Data on rising edge of bclk, frame high, 1clk before data */ @@ -455,12 +456,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } - mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR; + mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA; regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr); regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr); mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP | - ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA; + ESAI_xCCR_xFSD | ESAI_xCCR_xCKD; regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr); regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr); diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index a26686e7281c..4842e6df9a2d 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -96,7 +96,7 @@ struct fsl_spdif_priv { bool dpll_locked; u32 txrate[SPDIF_TXRATE_MAX]; u8 txclk_df[SPDIF_TXRATE_MAX]; - u8 sysclk_df[SPDIF_TXRATE_MAX]; + u16 sysclk_df[SPDIF_TXRATE_MAX]; u8 txclk_src[SPDIF_TXRATE_MAX]; u8 rxclk_src; struct clk *txclk[SPDIF_TXRATE_MAX]; @@ -376,7 +376,8 @@ static int spdif_set_sample_rate(struct snd_pcm_substream *substream, struct platform_device *pdev = spdif_priv->pdev; unsigned long csfs = 0; u32 stc, mask, rate; - u8 clk, txclk_df, sysclk_df; + u16 sysclk_df; + u8 clk, txclk_df; int ret; switch (sample_rate) { @@ -1109,8 +1110,9 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, static const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); u64 rate_ideal, rate_actual, sub; - u32 sysclk_dfmin, sysclk_dfmax; - u32 txclk_df, sysclk_df, arate; + u32 arate; + u16 sysclk_dfmin, sysclk_dfmax, sysclk_df; + u8 txclk_df; /* The sysclk has an extra divisor [2, 512] */ sysclk_dfmin = is_sysclk ? 2 : 1; diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h index 7666dabaccfd..e6c61e07bc1a 100644 --- a/sound/soc/fsl/fsl_spdif.h +++ b/sound/soc/fsl/fsl_spdif.h @@ -152,7 +152,7 @@ enum spdif_gainsel { #define STC_TXCLK_ALL_EN_MASK (1 << STC_TXCLK_ALL_EN_OFFSET) #define STC_TXCLK_ALL_EN (1 << STC_TXCLK_ALL_EN_OFFSET) #define STC_TXCLK_DF_OFFSET 0 -#define STC_TXCLK_DF_MASK (0x7ff << STC_TXCLK_DF_OFFSET) +#define STC_TXCLK_DF_MASK (0x7f << STC_TXCLK_DF_OFFSET) #define STC_TXCLK_DF(x) ((((x) - 1) << STC_TXCLK_DF_OFFSET) & STC_TXCLK_DF_MASK) #define STC_TXCLK_SRC_MAX 8 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 0a648229e643..09b2967befd9 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1439,8 +1439,10 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi) * different name to register the device. */ if (!ssi->card_name[0] && of_get_property(np, "codec-handle", NULL)) { - sprop = of_get_property(of_find_node_by_path("/"), - "compatible", NULL); + struct device_node *root = of_find_node_by_path("/"); + + sprop = of_get_property(root, "compatible", NULL); + of_node_put(root); /* Strip "fsl," in the compatible name if applicable */ p = strrchr(sprop, ','); if (p) diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c index b6cb80480b60..bf8597f57dce 100644 --- a/sound/soc/fsl/imx-sgtl5000.c +++ b/sound/soc/fsl/imx-sgtl5000.c @@ -108,6 +108,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev) ret = -EPROBE_DEFER; goto fail; } + put_device(&ssi_pdev->dev); codec_dev = of_find_i2c_device_by_node(codec_np); if (!codec_dev) { dev_dbg(&pdev->dev, "failed to find codec platform device\n"); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 08df261024cf..7147bba45a2a 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -421,6 +421,7 @@ static int simple_dai_link_of(struct simple_priv *priv, asoc_simple_card_canonicalize_platform(dai_link); dai_link_of_err: + of_node_put(plat); of_node_put(node); return ret; @@ -441,11 +442,12 @@ static int simple_for_each_link(struct simple_priv *priv, struct device_node *top = dev->of_node; struct device_node *node; bool is_top = 0; + int ret = 0; /* Check if it has dai-link */ node = of_get_child_by_name(top, PREFIX "dai-link"); if (!node) { - node = top; + node = of_node_get(top); is_top = 1; } @@ -455,13 +457,14 @@ static int simple_for_each_link(struct simple_priv *priv, struct device_node *codec; struct device_node *np; int num = of_get_child_count(node); - int ret; /* get codec */ codec = of_get_child_by_name(node, is_top ? PREFIX "codec" : "codec"); - if (!codec) - return -ENODEV; + if (!codec) { + ret = -ENODEV; + goto error; + } of_node_put(codec); @@ -484,14 +487,18 @@ static int simple_for_each_link(struct simple_priv *priv, else ret = func_noml(priv, np, codec, li, is_top); - if (ret < 0) - return ret; + if (ret < 0) { + of_node_put(np); + goto error; + } } node = of_get_next_child(top, node); } while (!is_top && node); - return 0; + error: + of_node_put(node); + return ret; } static int simple_parse_aux_devs(struct device_node *node, diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig index 0a7e40d06395..12d6b73e9531 100644 --- a/sound/soc/intel/boards/Kconfig +++ b/sound/soc/intel/boards/Kconfig @@ -293,6 +293,7 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH depends on MFD_INTEL_LPSS && I2C && ACPI select SND_SOC_DA7219 select SND_SOC_MAX98927 + select SND_SOC_MAX98373 select SND_SOC_DMIC select SND_SOC_HDAC_HDMI help diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c index c00925f9da73..5cadb7f654f3 100644 --- a/sound/soc/intel/boards/bxt_da7219_max98357a.c +++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c @@ -16,6 +16,8 @@ * GNU General Public License for more details. */ +#include <asm/cpu_device_id.h> +#include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> @@ -49,6 +51,7 @@ struct bxt_card_private { enum { BXT_DPCM_AUDIO_PB = 0, BXT_DPCM_AUDIO_CP, + BXT_DPCM_AUDIO_HS_PB, BXT_DPCM_AUDIO_REF_CP, BXT_DPCM_AUDIO_DMIC_CP, BXT_DPCM_AUDIO_HDMI1_PB, @@ -103,7 +106,7 @@ static const struct snd_soc_dapm_widget broxton_widgets[] = { platform_clock_control, SND_SOC_DAPM_POST_PMD|SND_SOC_DAPM_PRE_PMU), }; -static const struct snd_soc_dapm_route broxton_map[] = { +static const struct snd_soc_dapm_route audio_map[] = { /* HP jack connectors - unknown if we have jack detection */ {"Headphone Jack", NULL, "HPL"}, {"Headphone Jack", NULL, "HPR"}, @@ -118,15 +121,6 @@ static const struct snd_soc_dapm_route broxton_map[] = { {"DMic", NULL, "SoC DMIC"}, /* CODEC BE connections */ - {"HiFi Playback", NULL, "ssp5 Tx"}, - {"ssp5 Tx", NULL, "codec0_out"}, - - {"Playback", NULL, "ssp1 Tx"}, - {"ssp1 Tx", NULL, "codec1_out"}, - - {"codec0_in", NULL, "ssp1 Rx"}, - {"ssp1 Rx", NULL, "Capture"}, - {"HDMI1", NULL, "hif5-0 Output"}, {"HDMI2", NULL, "hif6-0 Output"}, {"HDMI2", NULL, "hif7-0 Output"}, @@ -146,6 +140,28 @@ static const struct snd_soc_dapm_route broxton_map[] = { { "Headset Mic", NULL, "Platform Clock" }, }; +static const struct snd_soc_dapm_route broxton_map[] = { + {"HiFi Playback", NULL, "ssp5 Tx"}, + {"ssp5 Tx", NULL, "codec0_out"}, + + {"Playback", NULL, "ssp1 Tx"}, + {"ssp1 Tx", NULL, "codec1_out"}, + + {"codec0_in", NULL, "ssp1 Rx"}, + {"ssp1 Rx", NULL, "Capture"}, +}; + +static const struct snd_soc_dapm_route gemini_map[] = { + {"HiFi Playback", NULL, "ssp1 Tx"}, + {"ssp1 Tx", NULL, "codec0_out"}, + + {"Playback", NULL, "ssp2 Tx"}, + {"ssp2 Tx", NULL, "codec1_out"}, + + {"codec0_in", NULL, "ssp2 Rx"}, + {"ssp2 Rx", NULL, "Capture"}, +}; + static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { @@ -193,6 +209,12 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd) return ret; } + snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_1, KEY_VOLUMEUP); + snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); + snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_3, + KEY_VOICECOMMAND); + da7219_aad_jack_det(component, &broxton_headset); snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC"); @@ -384,6 +406,20 @@ static struct snd_soc_dai_link broxton_dais[] = { .dpcm_capture = 1, .ops = &broxton_da7219_fe_ops, }, + [BXT_DPCM_AUDIO_HS_PB] = { + .name = "Bxt Audio Headset Playback", + .stream_name = "Headset Playback", + .cpu_dai_name = "System Pin2", + .platform_name = "0000:00:0e.0", + .dynamic = 1, + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .nonatomic = 1, + .trigger = { + SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, + .dpcm_playback = 1, + .ops = &broxton_da7219_fe_ops, + }, [BXT_DPCM_AUDIO_REF_CP] = { .name = "Bxt Audio Reference cap", @@ -532,6 +568,11 @@ static struct snd_soc_dai_link broxton_dais[] = { }, }; +static const struct x86_cpu_id glk_ids[] = { + { X86_VENDOR_INTEL, 6, 0x7A }, /* Geminilake CPU_ID */ + {} +}; + #define NAME_SIZE 32 static int bxt_card_late_probe(struct snd_soc_card *card) { @@ -541,6 +582,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card) int err, i = 0; char jack_name[NAME_SIZE]; + if (x86_match_cpu(glk_ids)) + snd_soc_dapm_add_routes(&card->dapm, gemini_map, + ARRAY_SIZE(gemini_map)); + else + snd_soc_dapm_add_routes(&card->dapm, broxton_map, + ARRAY_SIZE(broxton_map)); + list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { component = pcm->codec_dai->component; snprintf(jack_name, sizeof(jack_name), @@ -576,8 +624,8 @@ static struct snd_soc_card broxton_audio_card = { .num_controls = ARRAY_SIZE(broxton_controls), .dapm_widgets = broxton_widgets, .num_dapm_widgets = ARRAY_SIZE(broxton_widgets), - .dapm_routes = broxton_map, - .num_dapm_routes = ARRAY_SIZE(broxton_map), + .dapm_routes = audio_map, + .num_dapm_routes = ARRAY_SIZE(audio_map), .fully_routed = true, .late_probe = bxt_card_late_probe, }; @@ -597,6 +645,26 @@ static int broxton_audio_probe(struct platform_device *pdev) broxton_audio_card.dev = &pdev->dev; snd_soc_card_set_drvdata(&broxton_audio_card, ctx); + if (x86_match_cpu(glk_ids)) { + unsigned int i; + + broxton_audio_card.name = "glkda7219max"; + /* Fixup the SSP entries for geminilake */ + for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) { + /* MAXIM_CODEC is connected to SSP1. */ + if (!strcmp(broxton_dais[i].codec_dai_name, + BXT_MAXIM_CODEC_DAI)) { + broxton_dais[i].name = "SSP1-Codec"; + broxton_dais[i].cpu_dai_name = "SSP1 Pin"; + } + /* DIALOG_CODE is connected to SSP2 */ + else if (!strcmp(broxton_dais[i].codec_dai_name, + BXT_DIALOG_CODEC_DAI)) { + broxton_dais[i].name = "SSP2-Codec"; + broxton_dais[i].cpu_dai_name = "SSP2 Pin"; + } + } + } /* override plaform name, if required */ mach = (&pdev->dev)->platform_data; @@ -610,12 +678,19 @@ static int broxton_audio_probe(struct platform_device *pdev) return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card); } +static const struct platform_device_id bxt_board_ids[] = { + { .name = "bxt_da7219_max98357a" }, + { .name = "glk_da7219_max98357a" }, + { } +}; + static struct platform_driver broxton_audio = { .probe = broxton_audio_probe, .driver = { .name = "bxt_da7219_max98357a", .pm = &snd_soc_pm_ops, }, + .id_table = bxt_board_ids, }; module_platform_driver(broxton_audio) @@ -625,5 +700,7 @@ MODULE_AUTHOR("Sathyanarayana Nujella <sathyanarayana.nujella@intel.com>"); MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com>"); MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>"); MODULE_AUTHOR("Conrad Cooke <conrad.cooke@intel.com>"); +MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:bxt_da7219_max98357a"); +MODULE_ALIAS("platform:glk_da7219_max98357a"); diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index c3b7732929cc..b0a4d297176e 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -844,74 +844,18 @@ static const struct x86_cpu_id cherrytrail_cpu_ids[] = { {} }; -static const struct acpi_gpio_params first_gpio = { 0, 0, false }; -static const struct acpi_gpio_params second_gpio = { 1, 0, false }; +static const struct acpi_gpio_params ext_amp_enable_gpios = { 0, 0, false }; -static const struct acpi_gpio_mapping byt_rt5651_amp_en_first[] = { - { "ext-amp-enable-gpios", &first_gpio, 1 }, - { }, -}; - -static const struct acpi_gpio_mapping byt_rt5651_amp_en_second[] = { - { "ext-amp-enable-gpios", &second_gpio, 1 }, +static const struct acpi_gpio_mapping cht_rt5651_gpios[] = { + /* + * Some boards have I2cSerialBusV2, GpioIo, GpioInt as ACPI resources, + * other boards may have I2cSerialBusV2, GpioInt, GpioIo instead. + * We want the GpioIo one for the ext-amp-enable-gpio. + */ + { "ext-amp-enable-gpios", &ext_amp_enable_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, { }, }; -/* - * Some boards have I2cSerialBusV2, GpioIo, GpioInt as ACPI resources, other - * boards may have I2cSerialBusV2, GpioInt, GpioIo instead. We want the - * GpioIo one for the ext-amp-enable-gpio and both count for the index in - * acpi_gpio_params index. So we have 2 different mappings and the code - * below figures out which one to use. - */ -struct byt_rt5651_acpi_resource_data { - int gpio_count; - int gpio_int_idx; -}; - -static int snd_byt_rt5651_acpi_resource(struct acpi_resource *ares, void *arg) -{ - struct byt_rt5651_acpi_resource_data *data = arg; - - if (ares->type != ACPI_RESOURCE_TYPE_GPIO) - return 0; - - if (ares->data.gpio.connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) - data->gpio_int_idx = data->gpio_count; - - data->gpio_count++; - return 0; -} - -static void snd_byt_rt5651_mc_pick_amp_en_gpio_mapping(struct device *codec) -{ - struct byt_rt5651_acpi_resource_data data = { 0, -1 }; - LIST_HEAD(resources); - int ret; - - ret = acpi_dev_get_resources(ACPI_COMPANION(codec), &resources, - snd_byt_rt5651_acpi_resource, &data); - if (ret < 0) { - dev_warn(codec, "Failed to get ACPI resources, not adding external amplifier GPIO mapping\n"); - return; - } - - /* All info we need is gathered during the walk */ - acpi_dev_free_resource_list(&resources); - - switch (data.gpio_int_idx) { - case 0: - byt_rt5651_gpios = byt_rt5651_amp_en_second; - break; - case 1: - byt_rt5651_gpios = byt_rt5651_amp_en_first; - break; - default: - dev_warn(codec, "Unknown GpioInt index %d, not adding external amplifier GPIO mapping\n", - data.gpio_int_idx); - } -} - struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */ u64 aif_value; /* 1: AIF1, 2: AIF2 */ u64 mclock_value; /* usually 25MHz (0x17d7940), ignored */ @@ -1038,7 +982,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) /* Cherry Trail devices use an external amplifier enable gpio */ if (x86_match_cpu(cherrytrail_cpu_ids) && !byt_rt5651_gpios) - snd_byt_rt5651_mc_pick_amp_en_gpio_mapping(codec_dev); + byt_rt5651_gpios = cht_rt5651_gpios; if (byt_rt5651_gpios) { devm_acpi_dev_add_driver_gpios(codec_dev, byt_rt5651_gpios); diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c index 6dd5c69671b3..2768a572d065 100644 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c @@ -2,7 +2,7 @@ // Copyright(c) 2018 Intel Corporation. /* - * Intel Kabylake I2S Machine Driver with MAX98927 & DA7219 Codecs + * Intel Kabylake I2S Machine Driver with MAX98927, MAX98373 & DA7219 Codecs * * Modified from: * Intel Kabylake I2S Machine driver supporting MAX98927 and @@ -24,8 +24,14 @@ #define KBL_DIALOG_CODEC_DAI "da7219-hifi" #define MAX98927_CODEC_DAI "max98927-aif1" -#define MAXIM_DEV0_NAME "i2c-MX98927:00" -#define MAXIM_DEV1_NAME "i2c-MX98927:01" +#define MAX98927_DEV0_NAME "i2c-MX98927:00" +#define MAX98927_DEV1_NAME "i2c-MX98927:01" + +#define MAX98373_CODEC_DAI "max98373-aif1" +#define MAX98373_DEV0_NAME "i2c-MX98373:00" +#define MAX98373_DEV1_NAME "i2c-MX98373:01" + + #define DUAL_CHANNEL 2 #define QUAD_CHANNEL 4 #define NAME_SIZE 32 @@ -176,20 +182,38 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream, for (j = 0; j < runtime->num_codecs; j++) { struct snd_soc_dai *codec_dai = runtime->codec_dais[j]; - if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) { + if (!strcmp(codec_dai->component->name, MAX98927_DEV0_NAME)) { ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16); if (ret < 0) { dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret); return ret; } } - if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) { + if (!strcmp(codec_dai->component->name, MAX98927_DEV1_NAME)) { ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16); if (ret < 0) { dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret); return ret; } } + if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) { + ret = snd_soc_dai_set_tdm_slot(codec_dai, + 0x03, 3, 8, 24); + if (ret < 0) { + dev_err(runtime->dev, + "DEV0 TDM slot err:%d\n", ret); + return ret; + } + } + if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) { + ret = snd_soc_dai_set_tdm_slot(codec_dai, + 0x0C, 3, 8, 24); + if (ret < 0) { + dev_err(runtime->dev, + "DEV0 TDM slot err:%d\n", ret); + return ret; + } + } } return 0; @@ -213,6 +237,25 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link; /* + * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE, + * where as kblda7219m98927 & kblmax98927 supports S16_LE by default. + * Skipping the port wise FE and BE configuration for kblda7219m98373 & + * kblmax98373 as the topology (FE & BE) supports S24_LE only. + */ + + if (!strcmp(rtd->card->name, "kblda7219m98373") || + !strcmp(rtd->card->name, "kblmax98373")) { + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = DUAL_CHANNEL; + + /* set SSP to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + return 0; + } + + /* * The ADSP will convert the FE rate to 48k, stereo, 24 bit */ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") || @@ -352,20 +395,31 @@ static struct snd_pcm_hw_constraint_list constraints_channels_quad = { static int kbl_fe_startup(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_soc_pcm_runtime *soc_rt = substream->private_data; /* * On this platform for PCM device we support, * 48Khz * stereo - * 16 bit audio */ runtime->hw.channels_max = DUAL_CHANNEL; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &constraints_channels); - - runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; - snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16); + /* + * Setup S24_LE (32 bit container and 24 bit valid data) for + * kblda7219m98373 & kblmax98373. For kblda7219m98927 & + * kblmax98927 keeping it as 16/16 due to topology FW dependency. + */ + if (!strcmp(soc_rt->card->name, "kblda7219m98373") || + !strcmp(soc_rt->card->name, "kblmax98373")) { + runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE; + snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); + + } else { + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; + snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16); + } snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_rates); @@ -398,11 +452,23 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd, static int kabylake_dmic_startup(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_soc_pcm_runtime *soc_rt = substream->private_data; runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &constraints_channels_quad); + /* + * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE. + * The DMIC also configured for S24_LE. Forcing the DMIC format to + * S24_LE due to the topology FW dependency. + */ + if (!strcmp(soc_rt->card->name, "kblda7219m98373") || + !strcmp(soc_rt->card->name, "kblmax98373")) { + runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE; + snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); + } + return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_rates); } @@ -448,29 +514,55 @@ static struct snd_soc_ops skylake_refcap_ops = { static struct snd_soc_codec_conf max98927_codec_conf[] = { { - .dev_name = MAXIM_DEV0_NAME, + .dev_name = MAX98927_DEV0_NAME, + .name_prefix = "Right", + }, + + { + .dev_name = MAX98927_DEV1_NAME, + .name_prefix = "Left", + }, +}; + +static struct snd_soc_codec_conf max98373_codec_conf[] = { + + { + .dev_name = MAX98373_DEV0_NAME, .name_prefix = "Right", }, { - .dev_name = MAXIM_DEV1_NAME, + .dev_name = MAX98373_DEV1_NAME, .name_prefix = "Left", }, }; -static struct snd_soc_dai_link_component ssp0_codec_components[] = { +static struct snd_soc_dai_link_component max98927_ssp0_codec_components[] = { { /* Left */ - .name = MAXIM_DEV0_NAME, + .name = MAX98927_DEV0_NAME, .dai_name = MAX98927_CODEC_DAI, }, { /* For Right */ - .name = MAXIM_DEV1_NAME, + .name = MAX98927_DEV1_NAME, .dai_name = MAX98927_CODEC_DAI, }, }; +static struct snd_soc_dai_link_component max98373_ssp0_codec_components[] = { + { /* Left */ + .name = MAX98373_DEV0_NAME, + .dai_name = MAX98373_CODEC_DAI, + }, + + { /* For Right */ + .name = MAX98373_DEV1_NAME, + .dai_name = MAX98373_CODEC_DAI, + }, + +}; + /* kabylake digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link kabylake_dais[] = { /* Front End DAI links */ @@ -607,8 +699,8 @@ static struct snd_soc_dai_link kabylake_dais[] = { .cpu_dai_name = "SSP0 Pin", .platform_name = "0000:00:1f.3", .no_pcm = 1, - .codecs = ssp0_codec_components, - .num_codecs = ARRAY_SIZE(ssp0_codec_components), + .codecs = max98927_ssp0_codec_components, + .num_codecs = ARRAY_SIZE(max98927_ssp0_codec_components), .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, @@ -683,7 +775,7 @@ static struct snd_soc_dai_link kabylake_dais[] = { }; /* kabylake digital audio interface glue - connects codec <--> CPU */ -static struct snd_soc_dai_link kabylake_max98927_dais[] = { +static struct snd_soc_dai_link kabylake_max98_927_373_dais[] = { /* Front End DAI links */ [KBL_DPCM_AUDIO_PB] = { .name = "Kbl Audio Port", @@ -802,8 +894,8 @@ static struct snd_soc_dai_link kabylake_max98927_dais[] = { .cpu_dai_name = "SSP0 Pin", .platform_name = "0000:00:1f.3", .no_pcm = 1, - .codecs = ssp0_codec_components, - .num_codecs = ARRAY_SIZE(ssp0_codec_components), + .codecs = max98927_ssp0_codec_components, + .num_codecs = ARRAY_SIZE(max98927_ssp0_codec_components), .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, @@ -917,8 +1009,8 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = { static struct snd_soc_card kbl_audio_card_max98927 = { .name = "kblmax98927", .owner = THIS_MODULE, - .dai_link = kabylake_max98927_dais, - .num_links = ARRAY_SIZE(kabylake_max98927_dais), + .dai_link = kabylake_max98_927_373_dais, + .num_links = ARRAY_SIZE(kabylake_max98_927_373_dais), .controls = kabylake_controls, .num_controls = ARRAY_SIZE(kabylake_controls), .dapm_widgets = kabylake_widgets, @@ -931,9 +1023,46 @@ static struct snd_soc_card kbl_audio_card_max98927 = { .late_probe = kabylake_card_late_probe, }; +static struct snd_soc_card kbl_audio_card_da7219_m98373 = { + .name = "kblda7219m98373", + .owner = THIS_MODULE, + .dai_link = kabylake_dais, + .num_links = ARRAY_SIZE(kabylake_dais), + .controls = kabylake_controls, + .num_controls = ARRAY_SIZE(kabylake_controls), + .dapm_widgets = kabylake_widgets, + .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets), + .dapm_routes = kabylake_map, + .num_dapm_routes = ARRAY_SIZE(kabylake_map), + .codec_conf = max98373_codec_conf, + .num_configs = ARRAY_SIZE(max98373_codec_conf), + .fully_routed = true, + .late_probe = kabylake_card_late_probe, +}; + +static struct snd_soc_card kbl_audio_card_max98373 = { + .name = "kblmax98373", + .owner = THIS_MODULE, + .dai_link = kabylake_max98_927_373_dais, + .num_links = ARRAY_SIZE(kabylake_max98_927_373_dais), + .controls = kabylake_controls, + .num_controls = ARRAY_SIZE(kabylake_controls), + .dapm_widgets = kabylake_widgets, + .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets), + .dapm_routes = kabylake_map, + .num_dapm_routes = ARRAY_SIZE(kabylake_map), + .codec_conf = max98373_codec_conf, + .num_configs = ARRAY_SIZE(max98373_codec_conf), + .fully_routed = true, + .late_probe = kabylake_card_late_probe, +}; + static int kabylake_audio_probe(struct platform_device *pdev) { struct kbl_codec_private *ctx; + struct snd_soc_dai_link *kbl_dai_link; + struct snd_soc_dai_link_component **codecs; + int i = 0; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -944,6 +1073,22 @@ static int kabylake_audio_probe(struct platform_device *pdev) kabylake_audio_card = (struct snd_soc_card *)pdev->id_entry->driver_data; + kbl_dai_link = kabylake_audio_card->dai_link; + + /* Update codecs for SSP0 with max98373 codec info */ + if (!strcmp(pdev->name, "kbl_da7219_max98373") || + (!strcmp(pdev->name, "kbl_max98373"))) { + for (i = 0; i < kabylake_audio_card->num_links; ++i) { + if (strcmp(kbl_dai_link[i].name, "SSP0-Codec")) + continue; + + codecs = &(kbl_dai_link[i].codecs); + *codecs = max98373_ssp0_codec_components; + kbl_dai_link[i].num_codecs = + ARRAY_SIZE(max98373_ssp0_codec_components); + break; + } + } kabylake_audio_card->dev = &pdev->dev; snd_soc_card_set_drvdata(kabylake_audio_card, ctx); @@ -961,13 +1106,23 @@ static const struct platform_device_id kbl_board_ids[] = { .driver_data = (kernel_ulong_t)&kbl_audio_card_max98927, }, + { + .name = "kbl_da7219_max98373", + .driver_data = + (kernel_ulong_t)&kbl_audio_card_da7219_m98373, + }, + { + .name = "kbl_max98373", + .driver_data = + (kernel_ulong_t)&kbl_audio_card_max98373, + }, { } }; static struct platform_driver kabylake_audio = { .probe = kabylake_audio_probe, .driver = { - .name = "kbl_da7219_max98927", + .name = "kbl_da7219_max98_927_373", .pm = &snd_soc_pm_ops, }, .id_table = kbl_board_ids, @@ -976,8 +1131,10 @@ static struct platform_driver kabylake_audio = { module_platform_driver(kabylake_audio) /* Module information */ -MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927 & DA7219"); +MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927/MAX98373 & DA7219"); MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:kbl_da7219_max98927"); MODULE_ALIAS("platform:kbl_max98927"); +MODULE_ALIAS("platform:kbl_da7219_max98373"); +MODULE_ALIAS("platform:kbl_max98373"); diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c index 61dedc103b19..229e39586868 100644 --- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c @@ -51,9 +51,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = { .id = "INT343A", .drv_name = "bxt_alc298s_i2s", .fw_filename = "intel/dsp_fw_bxtn.bin", - .sof_fw_filename = "intel/sof-apl.ri", - .sof_tplg_filename = "intel/sof-apl-rt298.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-rt298.tplg", }, { .id = "DLGS7219", @@ -61,31 +60,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = { .fw_filename = "intel/dsp_fw_bxtn.bin", .machine_quirk = snd_soc_acpi_codec_list, .quirk_data = &bxt_codecs, - .sof_fw_filename = "intel/sof-apl.ri", - .sof_tplg_filename = "intel/sof-apl-da7219.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-da7219.tplg", }, { .id = "104C5122", .drv_name = "bxt-pcm512x", - .sof_fw_filename = "intel/sof-apl.ri", - .sof_tplg_filename = "intel/sof-apl-pcm512x.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-pcm512x.tplg", }, { .id = "1AEC8804", .drv_name = "bxt-wm8804", - .sof_fw_filename = "intel/sof-apl.ri", - .sof_tplg_filename = "intel/sof-apl-wm8804.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-wm8804.tplg", }, { .id = "INT34C3", .drv_name = "bxt_tdf8532", .machine_quirk = apl_quirk, - .sof_fw_filename = "intel/sof-apl.ri", - .sof_tplg_filename = "intel/sof-apl-tdf8532.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-tdf8532.tplg", }, {}, }; diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c index 96f9c553fe6c..fe812a909db4 100644 --- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c @@ -83,9 +83,8 @@ static struct snd_soc_acpi_mach byt_thinkpad_10 = { .drv_name = "cht-bsw-rt5672", .fw_filename = "intel/fw_sst_0f28.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5670.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5670.tplg", }; static struct snd_soc_acpi_mach byt_pov_p1006w = { @@ -93,9 +92,8 @@ static struct snd_soc_acpi_mach byt_pov_p1006w = { .drv_name = "bytcr_rt5651", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcr_rt5651", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5651.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5651.tplg", }; static struct snd_soc_acpi_mach *byt_quirk(void *arg) @@ -136,63 +134,56 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = { .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcr_rt5640", .machine_quirk = byt_quirk, - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5640.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5640.tplg", }, { .id = "10EC5642", .drv_name = "bytcr_rt5640", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcr_rt5640", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5640.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5640.tplg", }, { .id = "INTCCFFD", .drv_name = "bytcr_rt5640", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcr_rt5640", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5640.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5640.tplg", }, { .id = "10EC5651", .drv_name = "bytcr_rt5651", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcr_rt5651", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5651.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5651.tplg", }, { .id = "DLGS7212", .drv_name = "bytcht_da7213", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcht_da7213", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-da7213.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-da7213.tplg", }, { .id = "DLGS7213", .drv_name = "bytcht_da7213", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcht_da7213", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-da7213.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-da7213.tplg", }, { .id = "ESSX8316", .drv_name = "bytcht_es8316", .fw_filename = "intel/fw_sst_0f28.bin", .board = "bytcht_es8316", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-es8316.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-es8316.tplg", }, /* some Baytrail platforms rely on RT5645, use CHT machine driver */ { @@ -200,18 +191,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = { .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_0f28.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5645.tplg", }, { .id = "10EC5648", .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_0f28.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-rt5645.tplg", }, /* use CHT driver to Baytrail Chromebooks */ { @@ -219,9 +208,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = { .drv_name = "cht-bsw-max98090", .fw_filename = "intel/fw_sst_0f28.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-byt.ri", - .sof_tplg_filename = "intel/sof-byt-max98090.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-byt.ri", + .sof_tplg_filename = "sof-byt-max98090.tplg", }, #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) /* diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c index 91bb99b69601..deafd87cc764 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c @@ -44,9 +44,8 @@ static struct snd_soc_acpi_mach cht_surface_mach = { .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5645.tplg", }; static struct snd_soc_acpi_mach *cht_quirk(void *arg) @@ -68,90 +67,80 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = { .drv_name = "cht-bsw-rt5672", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5670.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5670.tplg", }, { .id = "10EC5672", .drv_name = "cht-bsw-rt5672", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5670.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5670.tplg", }, { .id = "10EC5645", .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5645.tplg", }, { .id = "10EC5650", .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5645.tplg", }, { .id = "10EC3270", .drv_name = "cht-bsw-rt5645", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5645.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5645.tplg", }, { .id = "193C9890", .drv_name = "cht-bsw-max98090", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-max98090.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-max98090.tplg", }, { .id = "10508824", .drv_name = "cht-bsw-nau8824", .fw_filename = "intel/fw_sst_22a8.bin", .board = "cht-bsw", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-nau8824.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-nau8824.tplg", }, { .id = "DLGS7212", .drv_name = "bytcht_da7213", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcht_da7213", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-da7213.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-da7213.tplg", }, { .id = "DLGS7213", .drv_name = "bytcht_da7213", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcht_da7213", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-da7213.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-da7213.tplg", }, { .id = "ESSX8316", .drv_name = "bytcht_es8316", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcht_es8316", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-es8316.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-es8316.tplg", }, /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */ { @@ -160,18 +149,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = { .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcr_rt5640", .machine_quirk = cht_quirk, - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5640.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5640.tplg", }, { .id = "10EC3276", .drv_name = "bytcr_rt5640", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcr_rt5640", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5640.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5640.tplg", }, /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */ { @@ -179,9 +166,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = { .drv_name = "bytcr_rt5651", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcr_rt5651", - .sof_fw_filename = "intel/sof-cht.ri", - .sof_tplg_filename = "intel/sof-cht-rt5651.tplg", - .asoc_plat_name = "sst-mfld-platform", + .sof_fw_filename = "sof-cht.ri", + .sof_tplg_filename = "sof-cht-rt5651.tplg", }, #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) /* diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c index ec8e28e7b937..a914dd238d0a 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c @@ -20,9 +20,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = { .drv_name = "cnl_rt274", .fw_filename = "intel/dsp_fw_cnl.bin", .pdata = &cnl_pdata, - .sof_fw_filename = "intel/sof-cnl.ri", - .sof_tplg_filename = "intel/sof-cnl-rt274.tplg", - .asoc_plat_name = "0000:00:1f.3", + .sof_fw_filename = "sof-cnl.ri", + .sof_tplg_filename = "sof-cnl-rt274.tplg", }, {}, }; diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c index 305875af71ca..3f2061475ae4 100644 --- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c @@ -19,9 +19,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = { .id = "INT343A", .drv_name = "glk_alc298s_i2s", .fw_filename = "intel/dsp_fw_glk.bin", - .sof_fw_filename = "intel/sof-glk.ri", - .sof_tplg_filename = "intel/sof-glk-alc298.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-glk.ri", + .sof_tplg_filename = "sof-glk-alc298.tplg", }, { .id = "DLGS7219", @@ -29,9 +28,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = { .fw_filename = "intel/dsp_fw_glk.bin", .machine_quirk = snd_soc_acpi_codec_list, .quirk_data = &glk_codecs, - .sof_fw_filename = "intel/sof-glk.ri", - .sof_tplg_filename = "intel/sof-glk-da7219.tplg", - .asoc_plat_name = "0000:00:0e.0", + .sof_fw_filename = "sof-glk.ri", + .sof_tplg_filename = "sof-glk-da7219.tplg", }, {}, }; diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c index 533c1064f84b..68ae43f7b4b2 100644 --- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c @@ -23,7 +23,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[] = { /* .sof_fw_filename is dynamically set in sof/intel driver */ - .sof_tplg_filename = "intel/sof-hda-generic.tplg", + .sof_tplg_filename = "sof-hda-generic.tplg", /* * .machine_quirk and .quirk_data are not used here but diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c index 494a0ea9b029..690b305a255b 100644 --- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c @@ -23,9 +23,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = { .id = "INT33CA", .drv_name = "haswell-audio", .fw_filename = "intel/IntcSST1.bin", - .sof_fw_filename = "intel/sof-hsw.ri", - .sof_tplg_filename = "intel/sof-hsw.tplg", - .asoc_plat_name = "haswell-pcm-audio", + .sof_fw_filename = "sof-hsw.ri", + .sof_tplg_filename = "sof-hsw.tplg", }, {} }; @@ -36,25 +35,22 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = { .id = "INT343A", .drv_name = "broadwell-audio", .fw_filename = "intel/IntcSST2.bin", - .sof_fw_filename = "intel/sof-bdw.ri", - .sof_tplg_filename = "intel/sof-bdw-rt286.tplg", - .asoc_plat_name = "haswell-pcm-audio", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt286.tplg", }, { .id = "RT5677CE", .drv_name = "bdw-rt5677", .fw_filename = "intel/IntcSST2.bin", - .sof_fw_filename = "intel/sof-bdw.ri", - .sof_tplg_filename = "intel/sof-bdw-rt5677.tplg", - .asoc_plat_name = "haswell-pcm-audio", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt5677.tplg", }, { .id = "INT33CA", .drv_name = "haswell-audio", .fw_filename = "intel/IntcSST2.bin", - .sof_fw_filename = "intel/sof-bdw.ri", - .sof_tplg_filename = "intel/sof-bdw-rt5640.tplg", - .asoc_plat_name = "haswell-pcm-audio", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt5640.tplg", }, {} }; diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c index 33b441dca4d3..e5a6be5bc0ee 100644 --- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c @@ -20,9 +20,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = { .drv_name = "icl_rt274", .fw_filename = "intel/dsp_fw_icl.bin", .pdata = &icl_pdata, - .sof_fw_filename = "intel/sof-icl.ri", - .sof_tplg_filename = "intel/sof-icl-rt274.tplg", - .asoc_plat_name = "0000:00:1f.3", + .sof_fw_filename = "sof-icl.ri", + .sof_tplg_filename = "sof-icl-rt274.tplg", }, {}, }; diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c index e6fa6f470526..4b331058e807 100644 --- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c @@ -37,6 +37,11 @@ static struct snd_soc_acpi_codecs kbl_7219_98927_codecs = { .codecs = {"MX98927"} }; +static struct snd_soc_acpi_codecs kbl_7219_98373_codecs = { + .num_codecs = 1, + .codecs = {"MX98373"} +}; + struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { { .id = "INT343A", @@ -106,6 +111,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { .drv_name = "kbl_rt5660", .fw_filename = "intel/dsp_fw_kbl.bin", }, + { + .id = "DLGS7219", + .drv_name = "kbl_da7219_max98373", + .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_7219_98373_codecs, + .pdata = &skl_dmic_data + }, + { + .id = "MX98373", + .drv_name = "kbl_max98373", + .fw_filename = "intel/dsp_fw_kbl.bin", + .pdata = &skl_dmic_data + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines); diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index 088e6b2fb1bc..1ae83f4ccc36 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -1459,6 +1459,7 @@ static const struct snd_soc_component_driver skl_component = { .ops = &skl_platform_ops, .pcm_new = skl_pcm_new, .pcm_free = skl_pcm_free, + .ignore_module_refcount = 1, /* do not increase the refcount in core */ }; int skl_platform_register(struct device *dev) diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c index e408c1b270ab..1b8bcdaf02d1 100644 --- a/sound/soc/mediatek/common/mtk-btcvsd.c +++ b/sound/soc/mediatek/common/mtk-btcvsd.c @@ -250,7 +250,7 @@ static int mtk_btcvsd_snd_rx_init(struct mtk_btcvsd_snd *bt) bt->rx->buf_size = BTCVSD_RX_BUF_SIZE; bt->rx->timeout = 0; bt->rx->rw_cnt = 0; - bt->tx->stream = SNDRV_PCM_STREAM_CAPTURE; + bt->rx->stream = SNDRV_PCM_STREAM_CAPTURE; return 0; } @@ -1269,7 +1269,7 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev) /* irq */ irq_id = platform_get_irq(pdev, 0); if (irq_id <= 0) { - dev_err(dev, "%s no irq found\n", dev->of_node->name); + dev_err(dev, "%pOFn no irq found\n", dev->of_node); return irq_id < 0 ? irq_id : -ENXIO; } diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c index ff3111ec876c..4e045dd305a7 100644 --- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c +++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c @@ -1139,7 +1139,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev) /* request irq */ irq_id = platform_get_irq(pdev, 0); if (!irq_id) { - dev_err(dev, "%s no irq found\n", dev->of_node->name); + dev_err(dev, "%pOFn no irq found\n", dev->of_node); return -ENXIO; } ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler, diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c index c25024f72e72..777e93d70bea 100644 --- a/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c +++ b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c @@ -963,7 +963,7 @@ static const struct mtk_afe_i2s_priv mt8183_i2s_priv[DAI_I2S_NUM] = { }, }; -int mt8183_dai_i2s_get_share(struct mtk_base_afe *afe) +static int mt8183_dai_i2s_get_share(struct mtk_base_afe *afe) { struct mt8183_afe_private *afe_priv = afe->platform_priv; const struct device_node *of_node = afe->dev->of_node; @@ -983,7 +983,7 @@ int mt8183_dai_i2s_get_share(struct mtk_base_afe *afe) return 0; } -int mt8183_dai_i2s_set_priv(struct mtk_base_afe *afe) +static int mt8183_dai_i2s_set_priv(struct mtk_base_afe *afe) { struct mt8183_afe_private *afe_priv = afe->platform_priv; struct mtk_afe_i2s_priv *i2s_priv; diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig index 7948e993adba..75ceb04d8bf0 100644 --- a/sound/soc/qcom/Kconfig +++ b/sound/soc/qcom/Kconfig @@ -98,11 +98,12 @@ config SND_SOC_MSM8996 config SND_SOC_SDM845 tristate "SoC Machine driver for SDM845 boards" - depends on QCOM_APR + depends on QCOM_APR && MFD_CROS_EC select SND_SOC_QDSP6 select SND_SOC_QCOM_COMMON select SND_SOC_RT5663 select SND_SOC_MAX98927 + select SND_SOC_CROS_EC_CODEC help To add support for audio on Qualcomm Technologies Inc. SDM845 SoC-based systems. diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index 1dd23bba1bed..4b559932adc3 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -164,41 +164,52 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card) if (!cpu || !codec) { dev_err(dev, "Can't find cpu/codec DT node\n"); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto error; } link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0); if (!link->cpu_of_node) { dev_err(card->dev, "error getting cpu phandle\n"); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto error; } ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); if (ret) { dev_err(card->dev, "error getting cpu dai name\n"); - return ERR_PTR(ret); + goto error; } ret = snd_soc_of_get_dai_link_codecs(dev, codec, link); if (ret < 0) { dev_err(card->dev, "error getting codec dai name\n"); - return ERR_PTR(ret); + goto error; } link->platform_of_node = link->cpu_of_node; ret = of_property_read_string(np, "link-name", &link->name); if (ret) { dev_err(card->dev, "error getting codec dai_link name\n"); - return ERR_PTR(ret); + goto error; } link->stream_name = link->name; link->init = apq8016_sbc_dai_init; link++; + + of_node_put(cpu); + of_node_put(codec); } return data; + + error: + of_node_put(np); + of_node_put(cpu); + of_node_put(codec); + return ERR_PTR(ret); } static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = { diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c index 4715527054e5..5661025e8cec 100644 --- a/sound/soc/qcom/common.c +++ b/sound/soc/qcom/common.c @@ -42,6 +42,9 @@ int qcom_snd_parse_of(struct snd_soc_card *card) link = card->dai_link; for_each_child_of_node(dev->of_node, np) { cpu = of_get_child_by_name(np, "cpu"); + platform = of_get_child_by_name(np, "platform"); + codec = of_get_child_by_name(np, "codec"); + if (!cpu) { dev_err(dev, "Can't find cpu DT node\n"); ret = -EINVAL; @@ -63,8 +66,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card) goto err; } - platform = of_get_child_by_name(np, "platform"); - codec = of_get_child_by_name(np, "codec"); if (codec && platform) { link->platform_of_node = of_parse_phandle(platform, "sound-dai", @@ -100,10 +101,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card) link->dpcm_capture = 1; link->stream_name = link->name; link++; + + of_node_put(cpu); + of_node_put(codec); + of_node_put(platform); } return 0; err: + of_node_put(np); of_node_put(cpu); of_node_put(codec); of_node_put(platform); diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h index 7ae580d677c8..0ae15d01a3f6 100644 --- a/sound/soc/samsung/dma.h +++ b/sound/soc/samsung/dma.h @@ -17,5 +17,6 @@ * otherwise actual DMA channel names must be passed to this function. */ int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter, - const char *tx, const char *rx); + const char *tx, const char *rx, + struct device *dma_dev); #endif /* _SAMSUNG_DMA_H */ diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c index 9104c98deeb7..302871974cb3 100644 --- a/sound/soc/samsung/dmaengine.c +++ b/sound/soc/samsung/dmaengine.c @@ -25,9 +25,9 @@ #include "dma.h" int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter, - const char *tx, const char *rx) + const char *tx, const char *rx, + struct device *dma_dev) { - unsigned int flags = SND_DMAENGINE_PCM_FLAG_COMPAT; struct snd_dmaengine_pcm_config *pcm_conf; pcm_conf = devm_kzalloc(dev, sizeof(*pcm_conf), GFP_KERNEL); @@ -36,15 +36,13 @@ int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter, pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; pcm_conf->compat_filter_fn = filter; + pcm_conf->dma_dev = dma_dev; - if (dev->of_node) { - pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx; - pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx; - } else { - flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME; - } + pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx; + pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx; - return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags); + return devm_snd_dmaengine_pcm_register(dev, pcm_conf, + SND_DMAENGINE_PCM_FLAG_COMPAT); } EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register); diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index ce00fe2f6aae..4231001226f4 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -1,14 +1,9 @@ -/* sound/soc/samsung/i2s.c - * - * ALSA SoC Audio Layer - Samsung I2S Controller driver - * - * Copyright (c) 2010 Samsung Electronics Co. Ltd. - * Jaswinder Singh <jassisinghbrar@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +// SPDX-License-Identifier: GPL-2.0 +// +// ALSA SoC Audio Layer - Samsung I2S Controller driver +// +// Copyright (c) 2010 Samsung Electronics Co. Ltd. +// Jaswinder Singh <jassisinghbrar@gmail.com> #include <dt-bindings/sound/samsung-i2s.h> #include <linux/delay.h> @@ -34,6 +29,9 @@ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) +#define SAMSUNG_I2S_ID_PRIMARY 1 +#define SAMSUNG_I2S_ID_SECONDARY 2 + struct samsung_i2s_variant_regs { unsigned int bfs_off; unsigned int rfs_off; @@ -57,64 +55,88 @@ struct samsung_i2s_dai_data { struct i2s_dai { /* Platform device for this DAI */ struct platform_device *pdev; - /* Memory mapped SFR region */ - void __iomem *addr; - /* Rate of RCLK source clock */ - unsigned long rclk_srcrate; - /* Frame Clock */ + + /* Frame clock */ unsigned frmclk; /* - * Specifically requested RCLK,BCLK by MACHINE Driver. + * Specifically requested RCLK, BCLK by machine driver. * 0 indicates CPU driver is free to choose any value. */ unsigned rfs, bfs; - /* I2S Controller's core clock */ - struct clk *clk; - /* Clock for generating I2S signals */ - struct clk *op_clk; /* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */ struct i2s_dai *pri_dai; /* Pointer to the Secondary_Fifo if it has one, NULL otherwise */ struct i2s_dai *sec_dai; -#define DAI_OPENED (1 << 0) /* Dai is opened */ -#define DAI_MANAGER (1 << 1) /* Dai is the manager */ + +#define DAI_OPENED (1 << 0) /* DAI is opened */ +#define DAI_MANAGER (1 << 1) /* DAI is the manager */ unsigned mode; + /* Driver for this DAI */ - struct snd_soc_dai_driver i2s_dai_drv; + struct snd_soc_dai_driver *drv; + /* DMA parameters */ struct snd_dmaengine_dai_dma_data dma_playback; struct snd_dmaengine_dai_dma_data dma_capture; struct snd_dmaengine_dai_dma_data idma_playback; dma_filter_fn filter; - u32 quirks; - u32 suspend_i2smod; - u32 suspend_i2scon; - u32 suspend_i2spsr; - const struct samsung_i2s_variant_regs *variant_regs; + + struct samsung_i2s_priv *priv; +}; + +struct samsung_i2s_priv { + struct platform_device *pdev; + struct platform_device *pdev_sec; + + /* Memory mapped SFR region */ + void __iomem *addr; /* Spinlock protecting access to the device's registers */ - spinlock_t spinlock; - spinlock_t *lock; + spinlock_t lock; + + /* Lock for cross interface checks */ + spinlock_t pcm_lock; + + /* CPU DAIs and their corresponding drivers */ + struct i2s_dai *dai; + struct snd_soc_dai_driver *dai_drv; + int num_dais; + + /* The I2S controller's core clock */ + struct clk *clk; + + /* Clock for generating I2S signals */ + struct clk *op_clk; + + /* Rate of RCLK source clock */ + unsigned long rclk_srcrate; + + /* Cache of selected I2S registers for system suspend */ + u32 suspend_i2smod; + u32 suspend_i2scon; + u32 suspend_i2spsr; + + const struct samsung_i2s_variant_regs *variant_regs; + u32 quirks; - /* Below fields are only valid if this is the primary FIFO */ + /* The clock provider's data */ struct clk *clk_table[3]; struct clk_onecell_data clk_data; }; -/* Lock for cross i/f checks */ -static DEFINE_SPINLOCK(lock); - -/* If this is the 'overlay' stereo DAI */ +/* Returns true if this is the 'overlay' stereo DAI */ static inline bool is_secondary(struct i2s_dai *i2s) { - return i2s->pri_dai ? true : false; + return i2s->drv->id == SAMSUNG_I2S_ID_SECONDARY; } /* If operating in SoC-Slave mode */ static inline bool is_slave(struct i2s_dai *i2s) { - u32 mod = readl(i2s->addr + I2SMOD); - return (mod & (1 << i2s->variant_regs->mss_off)) ? true : false; + struct samsung_i2s_priv *priv = i2s->priv; + + u32 mod = readl(priv->addr + I2SMOD); + return (mod & (1 << priv->variant_regs->mss_off)) ? true : false; } /* If this interface of the controller is transmitting data */ @@ -125,7 +147,7 @@ static inline bool tx_active(struct i2s_dai *i2s) if (!i2s) return false; - active = readl(i2s->addr + I2SCON); + active = readl(i2s->priv->addr + I2SCON); if (is_secondary(i2s)) active &= CON_TXSDMA_ACTIVE; @@ -163,7 +185,7 @@ static inline bool rx_active(struct i2s_dai *i2s) if (!i2s) return false; - active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE; + active = readl(i2s->priv->addr + I2SCON) & CON_RXDMA_ACTIVE; return active ? true : false; } @@ -202,7 +224,9 @@ static inline bool any_active(struct i2s_dai *i2s) static inline struct i2s_dai *to_info(struct snd_soc_dai *dai) { - return snd_soc_dai_get_drvdata(dai); + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); + + return &priv->dai[dai->id - 1]; } static inline bool is_opened(struct i2s_dai *i2s) @@ -224,9 +248,11 @@ static inline bool is_manager(struct i2s_dai *i2s) /* Read RCLK of I2S (in multiples of LRCLK) */ static inline unsigned get_rfs(struct i2s_dai *i2s) { + struct samsung_i2s_priv *priv = i2s->priv; u32 rfs; - rfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->rfs_off; - rfs &= i2s->variant_regs->rfs_mask; + + rfs = readl(priv->addr + I2SMOD) >> priv->variant_regs->rfs_off; + rfs &= priv->variant_regs->rfs_mask; switch (rfs) { case 7: return 192; @@ -243,10 +269,11 @@ static inline unsigned get_rfs(struct i2s_dai *i2s) /* Write RCLK of I2S (in multiples of LRCLK) */ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs) { - u32 mod = readl(i2s->addr + I2SMOD); - int rfs_shift = i2s->variant_regs->rfs_off; + struct samsung_i2s_priv *priv = i2s->priv; + u32 mod = readl(priv->addr + I2SMOD); + int rfs_shift = priv->variant_regs->rfs_off; - mod &= ~(i2s->variant_regs->rfs_mask << rfs_shift); + mod &= ~(priv->variant_regs->rfs_mask << rfs_shift); switch (rfs) { case 192: @@ -275,15 +302,17 @@ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs) break; } - writel(mod, i2s->addr + I2SMOD); + writel(mod, priv->addr + I2SMOD); } -/* Read Bit-Clock of I2S (in multiples of LRCLK) */ +/* Read bit-clock of I2S (in multiples of LRCLK) */ static inline unsigned get_bfs(struct i2s_dai *i2s) { + struct samsung_i2s_priv *priv = i2s->priv; u32 bfs; - bfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->bfs_off; - bfs &= i2s->variant_regs->bfs_mask; + + bfs = readl(priv->addr + I2SMOD) >> priv->variant_regs->bfs_off; + bfs &= priv->variant_regs->bfs_mask; switch (bfs) { case 8: return 256; @@ -298,12 +327,13 @@ static inline unsigned get_bfs(struct i2s_dai *i2s) } } -/* Write Bit-Clock of I2S (in multiples of LRCLK) */ +/* Write bit-clock of I2S (in multiples of LRCLK) */ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs) { - u32 mod = readl(i2s->addr + I2SMOD); - int tdm = i2s->quirks & QUIRK_SUPPORTS_TDM; - int bfs_shift = i2s->variant_regs->bfs_off; + struct samsung_i2s_priv *priv = i2s->priv; + u32 mod = readl(priv->addr + I2SMOD); + int tdm = priv->quirks & QUIRK_SUPPORTS_TDM; + int bfs_shift = priv->variant_regs->bfs_off; /* Non-TDM I2S controllers do not support BCLK > 48 * FS */ if (!tdm && bfs > 48) { @@ -311,7 +341,7 @@ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs) return; } - mod &= ~(i2s->variant_regs->bfs_mask << bfs_shift); + mod &= ~(priv->variant_regs->bfs_mask << bfs_shift); switch (bfs) { case 48: @@ -346,13 +376,13 @@ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs) return; } - writel(mod, i2s->addr + I2SMOD); + writel(mod, priv->addr + I2SMOD); } -/* Sample-Size */ +/* Sample size */ static inline int get_blc(struct i2s_dai *i2s) { - int blc = readl(i2s->addr + I2SMOD); + int blc = readl(i2s->priv->addr + I2SMOD); blc = (blc >> 13) & 0x3; @@ -363,11 +393,12 @@ static inline int get_blc(struct i2s_dai *i2s) } } -/* TX Channel Control */ +/* TX channel control */ static void i2s_txctrl(struct i2s_dai *i2s, int on) { - void __iomem *addr = i2s->addr; - int txr_off = i2s->variant_regs->txr_off; + struct samsung_i2s_priv *priv = i2s->priv; + void __iomem *addr = priv->addr; + int txr_off = priv->variant_regs->txr_off; u32 con = readl(addr + I2SCON); u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off); @@ -416,8 +447,9 @@ static void i2s_txctrl(struct i2s_dai *i2s, int on) /* RX Channel Control */ static void i2s_rxctrl(struct i2s_dai *i2s, int on) { - void __iomem *addr = i2s->addr; - int txr_off = i2s->variant_regs->txr_off; + struct samsung_i2s_priv *priv = i2s->priv; + void __iomem *addr = priv->addr; + int txr_off = priv->variant_regs->txr_off; u32 con = readl(addr + I2SCON); u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off); @@ -453,9 +485,9 @@ static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush) return; if (is_secondary(i2s)) - fic = i2s->addr + I2SFICS; + fic = i2s->priv->addr + I2SFICS; else - fic = i2s->addr + I2SFIC; + fic = i2s->priv->addr + I2SFIC; /* Flush the FIFO */ writel(readl(fic) | flush, fic); @@ -468,12 +500,13 @@ static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush) writel(readl(fic) & ~flush, fic); } -static int i2s_set_sysclk(struct snd_soc_dai *dai, - int clk_id, unsigned int rfs, int dir) +static int i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int rfs, + int dir) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = get_other_dai(i2s); - const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs; + const struct samsung_i2s_variant_regs *i2s_regs = priv->variant_regs; unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off; unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off; u32 mod, mask, val = 0; @@ -482,9 +515,9 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, pm_runtime_get_sync(dai->dev); - spin_lock_irqsave(i2s->lock, flags); - mod = readl(i2s->addr + I2SMOD); - spin_unlock_irqrestore(i2s->lock, flags); + spin_lock_irqsave(&priv->lock, flags); + mod = readl(priv->addr + I2SMOD); + spin_unlock_irqrestore(&priv->lock, flags); switch (clk_id) { case SAMSUNG_I2S_OPCLK: @@ -519,51 +552,46 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */ mask = 1 << i2s_regs->rclksrc_off; - if ((i2s->quirks & QUIRK_NO_MUXPSR) + if ((priv->quirks & QUIRK_NO_MUXPSR) || (clk_id == SAMSUNG_I2S_RCLKSRC_0)) clk_id = 0; else clk_id = 1; if (!any_active(i2s)) { - if (i2s->op_clk && !IS_ERR(i2s->op_clk)) { + if (priv->op_clk && !IS_ERR(priv->op_clk)) { if ((clk_id && !(mod & rsrc_mask)) || (!clk_id && (mod & rsrc_mask))) { - clk_disable_unprepare(i2s->op_clk); - clk_put(i2s->op_clk); + clk_disable_unprepare(priv->op_clk); + clk_put(priv->op_clk); } else { - i2s->rclk_srcrate = - clk_get_rate(i2s->op_clk); + priv->rclk_srcrate = + clk_get_rate(priv->op_clk); goto done; } } if (clk_id) - i2s->op_clk = clk_get(&i2s->pdev->dev, + priv->op_clk = clk_get(&i2s->pdev->dev, "i2s_opclk1"); else - i2s->op_clk = clk_get(&i2s->pdev->dev, + priv->op_clk = clk_get(&i2s->pdev->dev, "i2s_opclk0"); - if (WARN_ON(IS_ERR(i2s->op_clk))) { - ret = PTR_ERR(i2s->op_clk); - i2s->op_clk = NULL; + if (WARN_ON(IS_ERR(priv->op_clk))) { + ret = PTR_ERR(priv->op_clk); + priv->op_clk = NULL; goto err; } - ret = clk_prepare_enable(i2s->op_clk); + ret = clk_prepare_enable(priv->op_clk); if (ret) { - clk_put(i2s->op_clk); - i2s->op_clk = NULL; + clk_put(priv->op_clk); + priv->op_clk = NULL; goto err; } - i2s->rclk_srcrate = clk_get_rate(i2s->op_clk); + priv->rclk_srcrate = clk_get_rate(priv->op_clk); - /* Over-ride the other's */ - if (other) { - other->op_clk = i2s->op_clk; - other->rclk_srcrate = i2s->rclk_srcrate; - } } else if ((!clk_id && (mod & rsrc_mask)) || (clk_id && !(mod & rsrc_mask))) { dev_err(&i2s->pdev->dev, @@ -572,8 +600,6 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, goto err; } else { /* Call can't be on the active DAI */ - i2s->op_clk = other->op_clk; - i2s->rclk_srcrate = other->rclk_srcrate; goto done; } @@ -586,11 +612,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, goto err; } - spin_lock_irqsave(i2s->lock, flags); - mod = readl(i2s->addr + I2SMOD); + spin_lock_irqsave(&priv->lock, flags); + mod = readl(priv->addr + I2SMOD); mod = (mod & ~mask) | val; - writel(mod, i2s->addr + I2SMOD); - spin_unlock_irqrestore(i2s->lock, flags); + writel(mod, priv->addr + I2SMOD); + spin_unlock_irqrestore(&priv->lock, flags); done: pm_runtime_put(dai->dev); @@ -600,17 +626,17 @@ err: return ret; } -static int i2s_set_fmt(struct snd_soc_dai *dai, - unsigned int fmt) +static int i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; u32 mod, tmp = 0; unsigned long flags; - lrp_shift = i2s->variant_regs->lrp_off; - sdf_shift = i2s->variant_regs->sdf_off; - mod_slave = 1 << i2s->variant_regs->mss_off; + lrp_shift = priv->variant_regs->lrp_off; + sdf_shift = priv->variant_regs->sdf_off; + mod_slave = 1 << priv->variant_regs->mss_off; sdf_mask = MOD_SDF_MASK << sdf_shift; lrp_rlow = MOD_LR_RLOW << lrp_shift; @@ -661,7 +687,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any * clock configuration assigned in DT is not overwritten. */ - if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) + if (priv->rclk_srcrate == 0 && priv->clk_data.clks == NULL) i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 0, SND_SOC_CLOCK_IN); break; @@ -671,15 +697,15 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, } pm_runtime_get_sync(dai->dev); - spin_lock_irqsave(i2s->lock, flags); - mod = readl(i2s->addr + I2SMOD); + spin_lock_irqsave(&priv->lock, flags); + mod = readl(priv->addr + I2SMOD); /* * Don't change the I2S mode if any controller is active on this * channel. */ if (any_active(i2s) && ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) { - spin_unlock_irqrestore(i2s->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); pm_runtime_put(dai->dev); dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); @@ -688,8 +714,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, mod &= ~(sdf_mask | lrp_rlow | mod_slave); mod |= tmp; - writel(mod, i2s->addr + I2SMOD); - spin_unlock_irqrestore(i2s->lock, flags); + writel(mod, priv->addr + I2SMOD); + spin_unlock_irqrestore(&priv->lock, flags); pm_runtime_put(dai->dev); return 0; @@ -698,6 +724,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, static int i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); u32 mod, mask = 0, val = 0; struct clk *rclksrc; @@ -711,7 +738,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, switch (params_channels(params)) { case 6: val |= MOD_DC2_EN; - /* fall through */ + /* Fall through */ case 4: val |= MOD_DC1_EN; break; @@ -773,34 +800,35 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - spin_lock_irqsave(i2s->lock, flags); - mod = readl(i2s->addr + I2SMOD); + spin_lock_irqsave(&priv->lock, flags); + mod = readl(priv->addr + I2SMOD); mod = (mod & ~mask) | val; - writel(mod, i2s->addr + I2SMOD); - spin_unlock_irqrestore(i2s->lock, flags); + writel(mod, priv->addr + I2SMOD); + spin_unlock_irqrestore(&priv->lock, flags); snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture); i2s->frmclk = params_rate(params); - rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; + rclksrc = priv->clk_table[CLK_I2S_RCLK_SRC]; if (rclksrc && !IS_ERR(rclksrc)) - i2s->rclk_srcrate = clk_get_rate(rclksrc); + priv->rclk_srcrate = clk_get_rate(rclksrc); return 0; } -/* We set constraints on the substream acc to the version of I2S */ +/* We set constraints on the substream according to the version of I2S */ static int i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = get_other_dai(i2s); unsigned long flags; pm_runtime_get_sync(dai->dev); - spin_lock_irqsave(&lock, flags); + spin_lock_irqsave(&priv->pcm_lock, flags); i2s->mode |= DAI_OPENED; @@ -809,10 +837,10 @@ static int i2s_startup(struct snd_pcm_substream *substream, else i2s->mode |= DAI_MANAGER; - if (!any_active(i2s) && (i2s->quirks & QUIRK_NEED_RSTCLR)) - writel(CON_RSTCLR, i2s->addr + I2SCON); + if (!any_active(i2s) && (priv->quirks & QUIRK_NEED_RSTCLR)) + writel(CON_RSTCLR, i2s->priv->addr + I2SCON); - spin_unlock_irqrestore(&lock, flags); + spin_unlock_irqrestore(&priv->pcm_lock, flags); return 0; } @@ -820,11 +848,12 @@ static int i2s_startup(struct snd_pcm_substream *substream, static void i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = get_other_dai(i2s); unsigned long flags; - spin_lock_irqsave(&lock, flags); + spin_lock_irqsave(&priv->pcm_lock, flags); i2s->mode &= ~DAI_OPENED; i2s->mode &= ~DAI_MANAGER; @@ -836,13 +865,14 @@ static void i2s_shutdown(struct snd_pcm_substream *substream, i2s->rfs = 0; i2s->bfs = 0; - spin_unlock_irqrestore(&lock, flags); + spin_unlock_irqrestore(&priv->pcm_lock, flags); pm_runtime_put(dai->dev); } static int config_setup(struct i2s_dai *i2s) { + struct samsung_i2s_priv *priv = i2s->priv; struct i2s_dai *other = get_other_dai(i2s); unsigned rfs, bfs, blc; u32 psr; @@ -890,12 +920,12 @@ static int config_setup(struct i2s_dai *i2s) if (is_slave(i2s)) return 0; - if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { - psr = i2s->rclk_srcrate / i2s->frmclk / rfs; - writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); + if (!(priv->quirks & QUIRK_NO_MUXPSR)) { + psr = priv->rclk_srcrate / i2s->frmclk / rfs; + writel(((psr - 1) << 8) | PSR_PSREN, priv->addr + I2SPSR); dev_dbg(&i2s->pdev->dev, "RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n", - i2s->rclk_srcrate, psr, rfs, bfs); + priv->rclk_srcrate, psr, rfs, bfs); } return 0; @@ -904,6 +934,7 @@ static int config_setup(struct i2s_dai *i2s) static int i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); struct snd_soc_pcm_runtime *rtd = substream->private_data; struct i2s_dai *i2s = to_info(rtd->cpu_dai); @@ -914,10 +945,10 @@ static int i2s_trigger(struct snd_pcm_substream *substream, case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pm_runtime_get_sync(dai->dev); - spin_lock_irqsave(i2s->lock, flags); + spin_lock_irqsave(&priv->lock, flags); if (config_setup(i2s)) { - spin_unlock_irqrestore(i2s->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); return -EINVAL; } @@ -926,12 +957,12 @@ static int i2s_trigger(struct snd_pcm_substream *substream, else i2s_txctrl(i2s, 1); - spin_unlock_irqrestore(i2s->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - spin_lock_irqsave(i2s->lock, flags); + spin_lock_irqsave(&priv->lock, flags); if (capture) { i2s_rxctrl(i2s, 0); @@ -941,7 +972,7 @@ static int i2s_trigger(struct snd_pcm_substream *substream, i2s_fifo(i2s, FIC_TXFLUSH); } - spin_unlock_irqrestore(i2s->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); pm_runtime_put(dai->dev); break; } @@ -980,19 +1011,19 @@ static int i2s_set_clkdiv(struct snd_soc_dai *dai, static snd_pcm_sframes_t i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); - u32 reg = readl(i2s->addr + I2SFIC); + u32 reg = readl(priv->addr + I2SFIC); snd_pcm_sframes_t delay; - const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs; WARN_ON(!pm_runtime_active(dai->dev)); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) delay = FIC_RXCOUNT(reg); else if (is_secondary(i2s)) - delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS)); + delay = FICS_TXCOUNT(readl(priv->addr + I2SFICS)); else - delay = (reg >> i2s_regs->ftx0cnt_off) & 0x7f; + delay = (reg >> priv->variant_regs->ftx0cnt_off) & 0x7f; return delay; } @@ -1014,39 +1045,39 @@ static int i2s_resume(struct snd_soc_dai *dai) static int samsung_i2s_dai_probe(struct snd_soc_dai *dai) { + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = get_other_dai(i2s); unsigned long flags; pm_runtime_get_sync(dai->dev); - if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */ - snd_soc_dai_init_dma_data(dai, &other->sec_dai->dma_playback, - NULL); + if (is_secondary(i2s)) { + /* If this is probe on the secondary DAI */ + snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, NULL); } else { snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, - &i2s->dma_capture); + &i2s->dma_capture); - if (i2s->quirks & QUIRK_NEED_RSTCLR) - writel(CON_RSTCLR, i2s->addr + I2SCON); + if (priv->quirks & QUIRK_NEED_RSTCLR) + writel(CON_RSTCLR, priv->addr + I2SCON); - if (i2s->quirks & QUIRK_SUPPORTS_IDMA) - idma_reg_addr_init(i2s->addr, - i2s->sec_dai->idma_playback.addr); + if (priv->quirks & QUIRK_SUPPORTS_IDMA) + idma_reg_addr_init(priv->addr, + other->idma_playback.addr); } /* Reset any constraint on RFS and BFS */ i2s->rfs = 0; i2s->bfs = 0; - i2s->rclk_srcrate = 0; - spin_lock_irqsave(i2s->lock, flags); + spin_lock_irqsave(&priv->lock, flags); i2s_txctrl(i2s, 0); i2s_rxctrl(i2s, 0); i2s_fifo(i2s, FIC_TXFLUSH); i2s_fifo(other, FIC_TXFLUSH); i2s_fifo(i2s, FIC_RXFLUSH); - spin_unlock_irqrestore(i2s->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); /* Gate CDCLK by default */ if (!is_opened(other)) @@ -1059,16 +1090,17 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai) static int samsung_i2s_dai_remove(struct snd_soc_dai *dai) { - struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai); + struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai); + struct i2s_dai *i2s = to_info(dai); unsigned long flags; pm_runtime_get_sync(dai->dev); if (!is_secondary(i2s)) { - if (i2s->quirks & QUIRK_NEED_RSTCLR) { - spin_lock_irqsave(i2s->lock, flags); - writel(0, i2s->addr + I2SCON); - spin_unlock_irqrestore(i2s->lock, flags); + if (priv->quirks & QUIRK_NEED_RSTCLR) { + spin_lock_irqsave(&priv->lock, flags); + writel(0, priv->addr + I2SCON); + spin_unlock_irqrestore(&priv->lock, flags); } } @@ -1088,118 +1120,157 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = { .delay = i2s_delay, }; +static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = { + /* Backend DAI */ + SND_SOC_DAPM_AIF_OUT("Mixer DAI TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("Mixer DAI RX", NULL, 0, SND_SOC_NOPM, 0, 0), + + /* Playback Mixer */ + SND_SOC_DAPM_MIXER("Playback Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), +}; + +static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = { + { "Playback Mixer", NULL, "Primary" }, + { "Playback Mixer", NULL, "Secondary" }, + + { "Mixer DAI TX", NULL, "Playback Mixer" }, + { "Playback Mixer", NULL, "Mixer DAI RX" }, +}; + static const struct snd_soc_component_driver samsung_i2s_component = { - .name = "samsung-i2s", + .name = "samsung-i2s", + + .dapm_widgets = samsung_i2s_widgets, + .num_dapm_widgets = ARRAY_SIZE(samsung_i2s_widgets), + + .dapm_routes = samsung_i2s_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(samsung_i2s_dapm_routes), }; -#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ - SNDRV_PCM_FMTBIT_S16_LE | \ - SNDRV_PCM_FMTBIT_S24_LE) +#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE) -static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, - const struct samsung_i2s_dai_data *i2s_dai_data, - bool sec) +static int i2s_alloc_dais(struct samsung_i2s_priv *priv, + const struct samsung_i2s_dai_data *i2s_dai_data, + int num_dais) { - struct i2s_dai *i2s; - - i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); - if (i2s == NULL) - return NULL; - - i2s->pdev = pdev; - i2s->pri_dai = NULL; - i2s->sec_dai = NULL; - i2s->i2s_dai_drv.id = 1; - i2s->i2s_dai_drv.symmetric_rates = 1; - i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe; - i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove; - i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops; - i2s->i2s_dai_drv.suspend = i2s_suspend; - i2s->i2s_dai_drv.resume = i2s_resume; - i2s->i2s_dai_drv.playback.channels_min = 1; - i2s->i2s_dai_drv.playback.channels_max = 2; - i2s->i2s_dai_drv.playback.rates = i2s_dai_data->pcm_rates; - i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS; - - if (!sec) { - i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI; - i2s->i2s_dai_drv.capture.channels_min = 1; - i2s->i2s_dai_drv.capture.channels_max = 2; - i2s->i2s_dai_drv.capture.rates = i2s_dai_data->pcm_rates; - i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; - } else { - i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI_SEC; + static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" }; + static const char *stream_names[] = { "Primary", "Secondary" }; + struct snd_soc_dai_driver *dai_drv; + struct i2s_dai *dai; + int i; + + priv->dai = devm_kcalloc(&priv->pdev->dev, num_dais, + sizeof(*dai), GFP_KERNEL); + if (!priv->dai) + return -ENOMEM; + + priv->dai_drv = devm_kcalloc(&priv->pdev->dev, num_dais, + sizeof(*dai_drv), GFP_KERNEL); + if (!priv->dai_drv) + return -ENOMEM; + + for (i = 0; i < num_dais; i++) { + dai_drv = &priv->dai_drv[i]; + + dai_drv->probe = samsung_i2s_dai_probe; + dai_drv->remove = samsung_i2s_dai_remove; + dai_drv->suspend = i2s_suspend; + dai_drv->resume = i2s_resume; + + dai_drv->symmetric_rates = 1; + dai_drv->ops = &samsung_i2s_dai_ops; + + dai_drv->playback.channels_min = 1; + dai_drv->playback.channels_max = 2; + dai_drv->playback.rates = i2s_dai_data->pcm_rates; + dai_drv->playback.formats = SAMSUNG_I2S_FMTS; + dai_drv->playback.stream_name = stream_names[i]; + + dai_drv->id = i + 1; + dai_drv->name = dai_names[i]; + + priv->dai[i].drv = &priv->dai_drv[i]; + priv->dai[i].pdev = priv->pdev; } - return i2s; + + /* Initialize capture only for the primary DAI */ + dai_drv = &priv->dai_drv[SAMSUNG_I2S_ID_PRIMARY - 1]; + + dai_drv->capture.channels_min = 1; + dai_drv->capture.channels_max = 2; + dai_drv->capture.rates = i2s_dai_data->pcm_rates; + dai_drv->capture.formats = SAMSUNG_I2S_FMTS; + + return 0; } #ifdef CONFIG_PM static int i2s_runtime_suspend(struct device *dev) { - struct i2s_dai *i2s = dev_get_drvdata(dev); + struct samsung_i2s_priv *priv = dev_get_drvdata(dev); - i2s->suspend_i2smod = readl(i2s->addr + I2SMOD); - i2s->suspend_i2scon = readl(i2s->addr + I2SCON); - i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR); + priv->suspend_i2smod = readl(priv->addr + I2SMOD); + priv->suspend_i2scon = readl(priv->addr + I2SCON); + priv->suspend_i2spsr = readl(priv->addr + I2SPSR); - if (i2s->op_clk) - clk_disable_unprepare(i2s->op_clk); - clk_disable_unprepare(i2s->clk); + if (priv->op_clk) + clk_disable_unprepare(priv->op_clk); + clk_disable_unprepare(priv->clk); return 0; } static int i2s_runtime_resume(struct device *dev) { - struct i2s_dai *i2s = dev_get_drvdata(dev); + struct samsung_i2s_priv *priv = dev_get_drvdata(dev); int ret; - ret = clk_prepare_enable(i2s->clk); + ret = clk_prepare_enable(priv->clk); if (ret) return ret; - if (i2s->op_clk) { - ret = clk_prepare_enable(i2s->op_clk); + if (priv->op_clk) { + ret = clk_prepare_enable(priv->op_clk); if (ret) { - clk_disable_unprepare(i2s->clk); + clk_disable_unprepare(priv->clk); return ret; } } - writel(i2s->suspend_i2scon, i2s->addr + I2SCON); - writel(i2s->suspend_i2smod, i2s->addr + I2SMOD); - writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR); + writel(priv->suspend_i2scon, priv->addr + I2SCON); + writel(priv->suspend_i2smod, priv->addr + I2SMOD); + writel(priv->suspend_i2spsr, priv->addr + I2SPSR); return 0; } #endif /* CONFIG_PM */ -static void i2s_unregister_clocks(struct i2s_dai *i2s) +static void i2s_unregister_clocks(struct samsung_i2s_priv *priv) { int i; - for (i = 0; i < i2s->clk_data.clk_num; i++) { - if (!IS_ERR(i2s->clk_table[i])) - clk_unregister(i2s->clk_table[i]); + for (i = 0; i < priv->clk_data.clk_num; i++) { + if (!IS_ERR(priv->clk_table[i])) + clk_unregister(priv->clk_table[i]); } } -static void i2s_unregister_clock_provider(struct platform_device *pdev) +static void i2s_unregister_clock_provider(struct samsung_i2s_priv *priv) { - struct i2s_dai *i2s = dev_get_drvdata(&pdev->dev); - - of_clk_del_provider(pdev->dev.of_node); - i2s_unregister_clocks(i2s); + of_clk_del_provider(priv->pdev->dev.of_node); + i2s_unregister_clocks(priv); } -static int i2s_register_clock_provider(struct platform_device *pdev) + +static int i2s_register_clock_provider(struct samsung_i2s_priv *priv) { + const char * const i2s_clk_desc[] = { "cdclk", "rclk_src", "prescaler" }; const char *clk_name[2] = { "i2s_opclk0", "i2s_opclk1" }; const char *p_names[2] = { NULL }; - struct device *dev = &pdev->dev; - struct i2s_dai *i2s = dev_get_drvdata(dev); - const struct samsung_i2s_variant_regs *reg_info = i2s->variant_regs; + struct device *dev = &priv->pdev->dev; + const struct samsung_i2s_variant_regs *reg_info = priv->variant_regs; const char *i2s_clk_name[ARRAY_SIZE(i2s_clk_desc)]; struct clk *rclksrc; int ret, i; @@ -1224,110 +1295,170 @@ static int i2s_register_clock_provider(struct platform_device *pdev) return -ENOMEM; } - if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { + if (!(priv->quirks & QUIRK_NO_MUXPSR)) { /* Activate the prescaler */ - u32 val = readl(i2s->addr + I2SPSR); - writel(val | PSR_PSREN, i2s->addr + I2SPSR); + u32 val = readl(priv->addr + I2SPSR); + writel(val | PSR_PSREN, priv->addr + I2SPSR); - i2s->clk_table[CLK_I2S_RCLK_SRC] = clk_register_mux(dev, + priv->clk_table[CLK_I2S_RCLK_SRC] = clk_register_mux(dev, i2s_clk_name[CLK_I2S_RCLK_SRC], p_names, ARRAY_SIZE(p_names), CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, - i2s->addr + I2SMOD, reg_info->rclksrc_off, - 1, 0, i2s->lock); + priv->addr + I2SMOD, reg_info->rclksrc_off, + 1, 0, &priv->lock); - i2s->clk_table[CLK_I2S_RCLK_PSR] = clk_register_divider(dev, + priv->clk_table[CLK_I2S_RCLK_PSR] = clk_register_divider(dev, i2s_clk_name[CLK_I2S_RCLK_PSR], i2s_clk_name[CLK_I2S_RCLK_SRC], CLK_SET_RATE_PARENT, - i2s->addr + I2SPSR, 8, 6, 0, i2s->lock); + priv->addr + I2SPSR, 8, 6, 0, &priv->lock); p_names[0] = i2s_clk_name[CLK_I2S_RCLK_PSR]; - i2s->clk_data.clk_num = 2; + priv->clk_data.clk_num = 2; } - i2s->clk_table[CLK_I2S_CDCLK] = clk_register_gate(dev, + priv->clk_table[CLK_I2S_CDCLK] = clk_register_gate(dev, i2s_clk_name[CLK_I2S_CDCLK], p_names[0], CLK_SET_RATE_PARENT, - i2s->addr + I2SMOD, reg_info->cdclkcon_off, - CLK_GATE_SET_TO_DISABLE, i2s->lock); + priv->addr + I2SMOD, reg_info->cdclkcon_off, + CLK_GATE_SET_TO_DISABLE, &priv->lock); - i2s->clk_data.clk_num += 1; - i2s->clk_data.clks = i2s->clk_table; + priv->clk_data.clk_num += 1; + priv->clk_data.clks = priv->clk_table; ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, - &i2s->clk_data); + &priv->clk_data); if (ret < 0) { dev_err(dev, "failed to add clock provider: %d\n", ret); - i2s_unregister_clocks(i2s); + i2s_unregister_clocks(priv); } return ret; } +/* Create platform device for the secondary PCM */ +static int i2s_create_secondary_device(struct samsung_i2s_priv *priv) +{ + struct platform_device *pdev_sec; + const char *devname; + int ret; + + devname = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s-sec", + dev_name(&priv->pdev->dev)); + if (!devname) + return -ENOMEM; + + pdev_sec = platform_device_alloc(devname, -1); + if (!pdev_sec) + return -ENOMEM; + + pdev_sec->driver_override = kstrdup("samsung-i2s", GFP_KERNEL); + + ret = platform_device_add(pdev_sec); + if (ret < 0) { + platform_device_put(pdev_sec); + return ret; + } + + ret = device_attach(&pdev_sec->dev); + if (ret <= 0) { + platform_device_unregister(priv->pdev_sec); + dev_info(&pdev_sec->dev, "device_attach() failed\n"); + return ret; + } + + priv->pdev_sec = pdev_sec; + + return 0; +} + +static void i2s_delete_secondary_device(struct samsung_i2s_priv *priv) +{ + platform_device_unregister(priv->pdev_sec); + priv->pdev_sec = NULL; +} + static int samsung_i2s_probe(struct platform_device *pdev) { struct i2s_dai *pri_dai, *sec_dai = NULL; struct s3c_audio_pdata *i2s_pdata = pdev->dev.platform_data; - struct resource *res; - u32 regs_base, quirks = 0, idma_addr = 0; + u32 regs_base, idma_addr = 0; struct device_node *np = pdev->dev.of_node; const struct samsung_i2s_dai_data *i2s_dai_data; - int ret; + const struct platform_device_id *id; + struct samsung_i2s_priv *priv; + struct resource *res; + int num_dais, ret; - if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) + if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { i2s_dai_data = of_device_get_match_data(&pdev->dev); - else - i2s_dai_data = (struct samsung_i2s_dai_data *) - platform_get_device_id(pdev)->driver_data; + } else { + id = platform_get_device_id(pdev); - pri_dai = i2s_alloc_dai(pdev, i2s_dai_data, false); - if (!pri_dai) { - dev_err(&pdev->dev, "Unable to alloc I2S_pri\n"); - return -ENOMEM; + /* Nothing to do if it is the secondary device probe */ + if (!id) + return 0; + + i2s_dai_data = (struct samsung_i2s_dai_data *)id->driver_data; } - spin_lock_init(&pri_dai->spinlock); - pri_dai->lock = &pri_dai->spinlock; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - if (!np) { - if (i2s_pdata == NULL) { - dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n"); + if (np) { + priv->quirks = i2s_dai_data->quirks; + } else { + if (!i2s_pdata) { + dev_err(&pdev->dev, "Missing platform data\n"); return -EINVAL; } + priv->quirks = i2s_pdata->type.quirks; + } + num_dais = (priv->quirks & QUIRK_SEC_DAI) ? 2 : 1; + priv->pdev = pdev; + priv->variant_regs = i2s_dai_data->i2s_variant_regs; + + ret = i2s_alloc_dais(priv, i2s_dai_data, num_dais); + if (ret < 0) + return ret; + + pri_dai = &priv->dai[SAMSUNG_I2S_ID_PRIMARY - 1]; + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->pcm_lock); + + if (!np) { pri_dai->dma_playback.filter_data = i2s_pdata->dma_playback; pri_dai->dma_capture.filter_data = i2s_pdata->dma_capture; pri_dai->filter = i2s_pdata->dma_filter; - quirks = i2s_pdata->type.quirks; idma_addr = i2s_pdata->type.idma_addr; } else { - quirks = i2s_dai_data->quirks; if (of_property_read_u32(np, "samsung,idma-addr", &idma_addr)) { - if (quirks & QUIRK_SUPPORTS_IDMA) { + if (priv->quirks & QUIRK_SUPPORTS_IDMA) { dev_info(&pdev->dev, "idma address is not"\ "specified"); } } } - quirks &= ~(QUIRK_SEC_DAI | QUIRK_SUPPORTS_IDMA); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pri_dai->addr = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pri_dai->addr)) - return PTR_ERR(pri_dai->addr); + priv->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->addr)) + return PTR_ERR(priv->addr); regs_base = res->start; - pri_dai->clk = devm_clk_get(&pdev->dev, "iis"); - if (IS_ERR(pri_dai->clk)) { + priv->clk = devm_clk_get(&pdev->dev, "iis"); + if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "Failed to get iis clock\n"); - return PTR_ERR(pri_dai->clk); + return PTR_ERR(priv->clk); } - ret = clk_prepare_enable(pri_dai->clk); + ret = clk_prepare_enable(priv->clk); if (ret != 0) { dev_err(&pdev->dev, "failed to enable clock: %d\n", ret); return ret; @@ -1338,33 +1469,19 @@ static int samsung_i2s_probe(struct platform_device *pdev) pri_dai->dma_capture.chan_name = "rx"; pri_dai->dma_playback.addr_width = 4; pri_dai->dma_capture.addr_width = 4; - pri_dai->quirks = quirks; - pri_dai->variant_regs = i2s_dai_data->i2s_variant_regs; + pri_dai->priv = priv; - if (quirks & QUIRK_PRI_6CHAN) - pri_dai->i2s_dai_drv.playback.channels_max = 6; + if (priv->quirks & QUIRK_PRI_6CHAN) + pri_dai->drv->playback.channels_max = 6; ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter, - NULL, NULL); - if (ret < 0) - goto err_disable_clk; - - ret = devm_snd_soc_register_component(&pdev->dev, - &samsung_i2s_component, - &pri_dai->i2s_dai_drv, 1); + "tx", "rx", NULL); if (ret < 0) goto err_disable_clk; - if (quirks & QUIRK_SEC_DAI) { - sec_dai = i2s_alloc_dai(pdev, i2s_dai_data, true); - if (!sec_dai) { - dev_err(&pdev->dev, "Unable to alloc I2S_sec\n"); - ret = -ENOMEM; - goto err_disable_clk; - } + if (priv->quirks & QUIRK_SEC_DAI) { + sec_dai = &priv->dai[SAMSUNG_I2S_ID_SECONDARY - 1]; - sec_dai->lock = &pri_dai->spinlock; - sec_dai->variant_regs = pri_dai->variant_regs; sec_dai->dma_playback.addr = regs_base + I2STXDS; sec_dai->dma_playback.chan_name = "tx-sec"; @@ -1374,62 +1491,72 @@ static int samsung_i2s_probe(struct platform_device *pdev) } sec_dai->dma_playback.addr_width = 4; - sec_dai->addr = pri_dai->addr; - sec_dai->clk = pri_dai->clk; - sec_dai->quirks = quirks; sec_dai->idma_playback.addr = idma_addr; sec_dai->pri_dai = pri_dai; + sec_dai->priv = priv; pri_dai->sec_dai = sec_dai; - ret = samsung_asoc_dma_platform_register(&pdev->dev, - sec_dai->filter, "tx-sec", NULL); + ret = i2s_create_secondary_device(priv); if (ret < 0) goto err_disable_clk; - ret = devm_snd_soc_register_component(&pdev->dev, - &samsung_i2s_component, - &sec_dai->i2s_dai_drv, 1); + ret = samsung_asoc_dma_platform_register(&priv->pdev_sec->dev, + sec_dai->filter, "tx-sec", NULL, + &pdev->dev); if (ret < 0) - goto err_disable_clk; + goto err_del_sec; + } if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) { dev_err(&pdev->dev, "Unable to configure gpio\n"); ret = -EINVAL; - goto err_disable_clk; + goto err_del_sec; } - dev_set_drvdata(&pdev->dev, pri_dai); + dev_set_drvdata(&pdev->dev, priv); + + ret = devm_snd_soc_register_component(&pdev->dev, + &samsung_i2s_component, + priv->dai_drv, num_dais); + if (ret < 0) + goto err_del_sec; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = i2s_register_clock_provider(pdev); + ret = i2s_register_clock_provider(priv); if (ret < 0) goto err_disable_pm; - pri_dai->op_clk = clk_get_parent(pri_dai->clk_table[CLK_I2S_RCLK_SRC]); + priv->op_clk = clk_get_parent(priv->clk_table[CLK_I2S_RCLK_SRC]); return 0; err_disable_pm: pm_runtime_disable(&pdev->dev); +err_del_sec: + i2s_delete_secondary_device(priv); err_disable_clk: - clk_disable_unprepare(pri_dai->clk); + clk_disable_unprepare(priv->clk); return ret; } static int samsung_i2s_remove(struct platform_device *pdev) { - struct i2s_dai *pri_dai; + struct samsung_i2s_priv *priv = dev_get_drvdata(&pdev->dev); - pri_dai = dev_get_drvdata(&pdev->dev); + /* The secondary device has no driver data assigned */ + if (!priv) + return 0; pm_runtime_get_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - i2s_unregister_clock_provider(pdev); - clk_disable_unprepare(pri_dai->clk); + i2s_unregister_clock_provider(priv); + i2s_delete_secondary_device(priv); + clk_disable_unprepare(priv->clk); + pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c index e7b371b07230..694512f980fd 100644 --- a/sound/soc/samsung/odroid.c +++ b/sound/soc/samsung/odroid.c @@ -7,6 +7,7 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/module.h> @@ -17,26 +18,52 @@ struct odroid_priv { struct snd_soc_card card; - struct snd_soc_dai_link dai_link; - struct clk *clk_i2s_bus; struct clk *sclk_i2s; + + /* Spinlock protecting fields below */ + spinlock_t lock; + unsigned int be_sample_rate; + bool be_active; }; -static int odroid_card_startup(struct snd_pcm_substream *substream) +static int odroid_card_fe_startup(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2); + return 0; } -static int odroid_card_hw_params(struct snd_pcm_substream *substream, +static int odroid_card_fe_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->be_active && priv->be_sample_rate != params_rate(params)) + ret = -EINVAL; + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; +} + +static const struct snd_soc_ops odroid_card_fe_ops = { + .startup = odroid_card_fe_startup, + .hw_params = odroid_card_fe_hw_params, +}; + +static int odroid_card_be_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card); unsigned int pll_freq, rclk_freq, rfs; + unsigned long flags; int ret; switch (params_rate(params)) { @@ -83,22 +110,97 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream, return ret; } + spin_lock_irqsave(&priv->lock, flags); + priv->be_sample_rate = params_rate(params); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int odroid_card_be_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + priv->be_active = true; + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + priv->be_active = false; + break; + } + + spin_unlock_irqrestore(&priv->lock, flags); + return 0; } -static const struct snd_soc_ops odroid_card_ops = { - .startup = odroid_card_startup, - .hw_params = odroid_card_hw_params, +static const struct snd_soc_ops odroid_card_be_ops = { + .hw_params = odroid_card_be_hw_params, + .trigger = odroid_card_be_trigger, +}; + +/* DAPM routes for backward compatibility with old DTS */ +static const struct snd_soc_dapm_route odroid_dapm_routes[] = { + { "I2S Playback", NULL, "Mixer DAI TX" }, + { "HiFi Playback", NULL, "Mixer DAI TX" }, +}; + +static struct snd_soc_dai_link odroid_card_dais[] = { + { + /* Primary FE <-> BE link */ + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .ops = &odroid_card_fe_ops, + .name = "Primary", + .stream_name = "Primary", + .platform_name = "3830000.i2s", + .dynamic = 1, + .dpcm_playback = 1, + }, { + /* BE <-> CODECs link */ + .name = "I2S Mixer", + .cpu_name = "snd-soc-dummy", + .cpu_dai_name = "snd-soc-dummy-dai", + .platform_name = "snd-soc-dummy", + .ops = &odroid_card_be_ops, + .no_pcm = 1, + .dpcm_playback = 1, + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBS_CFS, + }, { + /* Secondary FE <-> BE link */ + .playback_only = 1, + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .ops = &odroid_card_fe_ops, + .name = "Secondary", + .stream_name = "Secondary", + .platform_name = "3830000.i2s-sec", + .dynamic = 1, + .dpcm_playback = 1, + } }; static int odroid_audio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct device_node *cpu_dai = NULL; struct device_node *cpu, *codec; struct odroid_priv *priv; - struct snd_soc_dai_link *link; struct snd_soc_card *card; - int ret; + struct snd_soc_dai_link *link, *codec_link; + int num_pcms, ret, i; + struct of_phandle_args args = {}; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -110,6 +212,7 @@ static int odroid_audio_probe(struct platform_device *pdev) card->owner = THIS_MODULE; card->fully_routed = true; + spin_lock_init(&priv->lock); snd_soc_card_set_drvdata(card, priv); ret = snd_soc_of_parse_card_name(card, "model"); @@ -130,45 +233,78 @@ static int odroid_audio_probe(struct platform_device *pdev) return ret; } - link = &priv->dai_link; - - link->ops = &odroid_card_ops; - link->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | - SND_SOC_DAIFMT_CBS_CFS; - - card->dai_link = &priv->dai_link; - card->num_links = 1; + card->dai_link = odroid_card_dais; + card->num_links = ARRAY_SIZE(odroid_card_dais); cpu = of_get_child_by_name(dev->of_node, "cpu"); codec = of_get_child_by_name(dev->of_node, "codec"); + link = card->dai_link; + codec_link = &card->dai_link[1]; - link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0); - if (!link->cpu_of_node) { - dev_err(dev, "Failed parsing cpu/sound-dai property\n"); - return -EINVAL; + /* + * For backwards compatibility create the secondary CPU DAI link only + * if there are 2 CPU DAI entries in the cpu sound-dai property in DT. + * Also add required DAPM routes not available in old DTS. + */ + num_pcms = of_count_phandle_with_args(cpu, "sound-dai", + "#sound-dai-cells"); + if (num_pcms == 1) { + card->dapm_routes = odroid_dapm_routes; + card->num_dapm_routes = ARRAY_SIZE(odroid_dapm_routes); + card->num_links--; } - ret = snd_soc_of_get_dai_link_codecs(dev, codec, link); - if (ret < 0) - goto err_put_codec_n; + for (i = 0; i < num_pcms; i++, link += 2) { + ret = of_parse_phandle_with_args(cpu, "sound-dai", + "#sound-dai-cells", i, &args); + if (ret < 0) + break; + + if (!args.np) { + dev_err(dev, "sound-dai property parse error: %d\n", ret); + ret = -EINVAL; + break; + } - link->platform_of_node = link->cpu_of_node; + ret = snd_soc_get_dai_name(&args, &link->cpu_dai_name); + of_node_put(args.np); - link->name = "Primary"; - link->stream_name = link->name; + if (ret < 0) + break; + } + if (ret == 0) { + cpu_dai = of_parse_phandle(cpu, "sound-dai", 0); + if (!cpu_dai) + ret = -EINVAL; + } + of_node_put(cpu); + of_node_put(codec); + if (ret < 0) + return ret; + + ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link); + if (ret < 0) + goto err_put_cpu_dai; + + /* Set capture capability only for boards with the MAX98090 CODEC */ + if (codec_link->num_codecs > 1) { + card->dai_link[0].dpcm_capture = 1; + card->dai_link[1].dpcm_capture = 1; + } - priv->sclk_i2s = of_clk_get_by_name(link->cpu_of_node, "i2s_opclk1"); + priv->sclk_i2s = of_clk_get_by_name(cpu_dai, "i2s_opclk1"); if (IS_ERR(priv->sclk_i2s)) { ret = PTR_ERR(priv->sclk_i2s); - goto err_put_i2s_n; + goto err_put_cpu_dai; } - priv->clk_i2s_bus = of_clk_get_by_name(link->cpu_of_node, "iis"); + priv->clk_i2s_bus = of_clk_get_by_name(cpu_dai, "iis"); if (IS_ERR(priv->clk_i2s_bus)) { ret = PTR_ERR(priv->clk_i2s_bus); goto err_put_sclk; } + of_node_put(cpu_dai); ret = devm_snd_soc_register_card(dev, card); if (ret < 0) { @@ -182,10 +318,9 @@ err_put_clk_i2s: clk_put(priv->clk_i2s_bus); err_put_sclk: clk_put(priv->sclk_i2s); -err_put_i2s_n: - of_node_put(link->cpu_of_node); -err_put_codec_n: - snd_soc_of_put_dai_link_codecs(link); +err_put_cpu_dai: + of_node_put(cpu_dai); + snd_soc_of_put_dai_link_codecs(codec_link); return ret; } @@ -193,8 +328,7 @@ static int odroid_audio_remove(struct platform_device *pdev) { struct odroid_priv *priv = platform_get_drvdata(pdev); - of_node_put(priv->dai_link.cpu_of_node); - snd_soc_of_put_dai_link_codecs(&priv->dai_link); + snd_soc_of_put_dai_link_codecs(&priv->card.dai_link[1]); clk_put(priv->sclk_i2s); clk_put(priv->clk_i2s_bus); diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c index 37f95eee1558..3c7baa561084 100644 --- a/sound/soc/samsung/pcm.c +++ b/sound/soc/samsung/pcm.c @@ -553,7 +553,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev) pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id]; ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, - NULL, NULL); + NULL, NULL, NULL); if (ret) { dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret); goto err_dis_pclk; diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c index cc0840fff5aa..c08638b0e458 100644 --- a/sound/soc/samsung/s3c2412-i2s.c +++ b/sound/soc/samsung/s3c2412-i2s.c @@ -177,7 +177,7 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev) ret = samsung_asoc_dma_platform_register(&pdev->dev, pdata->dma_filter, - NULL, NULL); + "tx", "rx", NULL); if (ret) { pr_err("failed to register the DMA: %d\n", ret); return ret; diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c index 8d58d02183bf..a8026b640c95 100644 --- a/sound/soc/samsung/s3c24xx-i2s.c +++ b/sound/soc/samsung/s3c24xx-i2s.c @@ -446,7 +446,7 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev) s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO; ret = samsung_asoc_dma_platform_register(&pdev->dev, NULL, - NULL, NULL); + "tx", "rx", NULL); if (ret) { dev_err(&pdev->dev, "Failed to register the DMA: %d\n", ret); return ret; diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c index cb59911e65c0..5e4afb330416 100644 --- a/sound/soc/samsung/spdif.c +++ b/sound/soc/samsung/spdif.c @@ -430,7 +430,7 @@ static int spdif_probe(struct platform_device *pdev) spdif->dma_playback = &spdif_stereo_out; ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, - NULL, NULL); + NULL, NULL, NULL); if (ret) { dev_err(&pdev->dev, "failed to register DMA: %d\n", ret); goto err4; diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 285afbafa662..3447dbdba1f1 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -780,7 +780,7 @@ static int fsi_clk_init(struct device *dev, return -EINVAL; } if (clock->div == clock->own) { - dev_err(dev, "cpu doens't support div clock\n"); + dev_err(dev, "cpu doesn't support div clock\n"); return -EINVAL; } } diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c index 7cda60188f41..af19010b9d88 100644 --- a/sound/soc/sh/rcar/gen.c +++ b/sound/soc/sh/rcar/gen.c @@ -255,6 +255,30 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv) RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80), RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80), RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80), + RSND_GEN_S_REG(SSI9_BUSIF0_MODE, 0x48c), + RSND_GEN_S_REG(SSI9_BUSIF0_ADINR, 0x484), + RSND_GEN_S_REG(SSI9_BUSIF0_DALIGN, 0x488), + RSND_GEN_S_REG(SSI9_BUSIF1_MODE, 0x4a0), + RSND_GEN_S_REG(SSI9_BUSIF1_ADINR, 0x4a4), + RSND_GEN_S_REG(SSI9_BUSIF1_DALIGN, 0x4a8), + RSND_GEN_S_REG(SSI9_BUSIF2_MODE, 0x4c0), + RSND_GEN_S_REG(SSI9_BUSIF2_ADINR, 0x4c4), + RSND_GEN_S_REG(SSI9_BUSIF2_DALIGN, 0x4c8), + RSND_GEN_S_REG(SSI9_BUSIF3_MODE, 0x4e0), + RSND_GEN_S_REG(SSI9_BUSIF3_ADINR, 0x4e4), + RSND_GEN_S_REG(SSI9_BUSIF3_DALIGN, 0x4e8), + RSND_GEN_S_REG(SSI9_BUSIF4_MODE, 0xd80), + RSND_GEN_S_REG(SSI9_BUSIF4_ADINR, 0xd84), + RSND_GEN_S_REG(SSI9_BUSIF4_DALIGN, 0xd88), + RSND_GEN_S_REG(SSI9_BUSIF5_MODE, 0xda0), + RSND_GEN_S_REG(SSI9_BUSIF5_ADINR, 0xda4), + RSND_GEN_S_REG(SSI9_BUSIF5_DALIGN, 0xda8), + RSND_GEN_S_REG(SSI9_BUSIF6_MODE, 0xdc0), + RSND_GEN_S_REG(SSI9_BUSIF6_ADINR, 0xdc4), + RSND_GEN_S_REG(SSI9_BUSIF6_DALIGN, 0xdc8), + RSND_GEN_S_REG(SSI9_BUSIF7_MODE, 0xde0), + RSND_GEN_S_REG(SSI9_BUSIF7_ADINR, 0xde4), + RSND_GEN_S_REG(SSI9_BUSIF7_DALIGN, 0xde8), }; static const struct rsnd_regmap_field_conf conf_scu[] = { diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 605e4b934982..90625c57847b 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -191,6 +191,30 @@ enum rsnd_reg { SSI_SYS_STATUS7, HDMI0_SEL, HDMI1_SEL, + SSI9_BUSIF0_MODE, + SSI9_BUSIF1_MODE, + SSI9_BUSIF2_MODE, + SSI9_BUSIF3_MODE, + SSI9_BUSIF4_MODE, + SSI9_BUSIF5_MODE, + SSI9_BUSIF6_MODE, + SSI9_BUSIF7_MODE, + SSI9_BUSIF0_ADINR, + SSI9_BUSIF1_ADINR, + SSI9_BUSIF2_ADINR, + SSI9_BUSIF3_ADINR, + SSI9_BUSIF4_ADINR, + SSI9_BUSIF5_ADINR, + SSI9_BUSIF6_ADINR, + SSI9_BUSIF7_ADINR, + SSI9_BUSIF0_DALIGN, + SSI9_BUSIF1_DALIGN, + SSI9_BUSIF2_DALIGN, + SSI9_BUSIF3_DALIGN, + SSI9_BUSIF4_DALIGN, + SSI9_BUSIF5_DALIGN, + SSI9_BUSIF6_DALIGN, + SSI9_BUSIF7_DALIGN, /* SSI */ SSICR, @@ -209,6 +233,9 @@ enum rsnd_reg { #define SSI_BUSIF_MODE(i) (SSI_BUSIF0_MODE + (i)) #define SSI_BUSIF_ADINR(i) (SSI_BUSIF0_ADINR + (i)) #define SSI_BUSIF_DALIGN(i) (SSI_BUSIF0_DALIGN + (i)) +#define SSI9_BUSIF_MODE(i) (SSI9_BUSIF0_MODE + (i)) +#define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i)) +#define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i)) #define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i)) diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index c74991dd18ab..2347f3404c06 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c @@ -181,28 +181,26 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod, if (rsnd_ssi_use_busif(io)) { int id = rsnd_mod_id(mod); int busif = rsnd_mod_id_sub(mod); + enum rsnd_reg adinr_reg, mode_reg, dalign_reg; - /* - * FIXME - * - * We can't support SSI9-4/5/6/7, because its address is - * out of calculation rule - */ if ((id == 9) && (busif >= 4)) { - struct device *dev = rsnd_priv_to_dev(priv); - - dev_err(dev, "This driver doesn't support SSI%d-%d, so far", - id, busif); + adinr_reg = SSI9_BUSIF_ADINR(busif); + mode_reg = SSI9_BUSIF_MODE(busif); + dalign_reg = SSI9_BUSIF_DALIGN(busif); + } else { + adinr_reg = SSI_BUSIF_ADINR(busif); + mode_reg = SSI_BUSIF_MODE(busif); + dalign_reg = SSI_BUSIF_DALIGN(busif); } - rsnd_mod_write(mod, SSI_BUSIF_ADINR(busif), + rsnd_mod_write(mod, adinr_reg, rsnd_get_adinr_bit(mod, io) | (rsnd_io_is_play(io) ? rsnd_runtime_channel_after_ctu(io) : rsnd_runtime_channel_original(io))); - rsnd_mod_write(mod, SSI_BUSIF_MODE(busif), + rsnd_mod_write(mod, mode_reg, rsnd_get_busif_shift(io, mod) | 1); - rsnd_mod_write(mod, SSI_BUSIF_DALIGN(busif), + rsnd_mod_write(mod, dalign_reg, rsnd_get_dalign(mod, io)); } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 994d21d7ba0f..93d316d5bf8e 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -947,7 +947,8 @@ static void soc_cleanup_component(struct snd_soc_component *component) snd_soc_dapm_free(snd_soc_component_get_dapm(component)); soc_cleanup_component_debugfs(component); component->card = NULL; - module_put(component->dev->driver->owner); + if (!component->driver->ignore_module_refcount) + module_put(component->dev->driver->owner); } static void soc_remove_component(struct snd_soc_component *component) @@ -1380,7 +1381,8 @@ static int soc_probe_component(struct snd_soc_card *card, return 0; } - if (!try_module_get(component->dev->driver->owner)) + if (!component->driver->ignore_module_refcount && + !try_module_get(component->dev->driver->owner)) return -ENODEV; component->card = card; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index d31d295b540f..1ec06ef6d161 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -157,6 +157,7 @@ static void pop_wait(u32 pop_time) schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); } +__printf(3, 4) static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...) { va_list args; @@ -331,8 +332,10 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget( */ if (_widget->sname) { w->sname = kstrdup_const(_widget->sname, GFP_KERNEL); - if (!w->sname) + if (!w->sname) { + kfree(w); return NULL; + } } return w; } diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 46252b13d3b3..748f5f641002 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -265,7 +265,6 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd) struct dmaengine_pcm *pcm = soc_component_to_pcm(component); const struct snd_dmaengine_pcm_config *config = pcm->config; struct device *dev = component->dev; - struct snd_dmaengine_dai_dma_data *dma_data; struct snd_pcm_substream *substream; size_t prealloc_buffer_size; size_t max_buffer_size; @@ -284,12 +283,9 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd) if (!substream) continue; - dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); - - if (!pcm->chan[i] && - (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) + if (!pcm->chan[i] && config && config->chan_names[i]) pcm->chan[i] = dma_request_slave_channel(dev, - dma_data->chan_name); + config->chan_names[i]); if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) { pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd, @@ -410,9 +406,8 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, const char *name; struct dma_chan *chan; - if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT | - SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) || - !dev->of_node) + if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node && + !(config && config->dma_dev && config->dma_dev->of_node))) return 0; if (config && config->dma_dev) { diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index c5638e15a2dd..25fca7055464 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -2587,6 +2587,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) { struct soc_tplg tplg; + int ret; /* setup parsing context */ memset(&tplg, 0, sizeof(tplg)); @@ -2600,7 +2601,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, tplg.bytes_ext_ops = ops->bytes_ext_ops; tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; - return soc_tplg_load(&tplg); + ret = soc_tplg_load(&tplg); + /* free the created components if fail to load topology */ + if (ret) + snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL); + + return ret; } EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c index 6d0bf78d114d..47c334de6b09 100644 --- a/sound/soc/stm/stm32_i2s.c +++ b/sound/soc/stm/stm32_i2s.c @@ -186,8 +186,9 @@ enum i2s_datlen { #define STM32_I2S_IS_SLAVE(x) ((x)->ms_flg == I2S_MS_SLAVE) /** + * struct stm32_i2s_data - private data of I2S * @regmap_conf: I2S register map configuration pointer - * @egmap: I2S register map pointer + * @regmap: I2S register map pointer * @pdev: device data pointer * @dai_drv: DAI driver pointer * @dma_data_tx: dma configuration data for tx channel @@ -200,6 +201,7 @@ enum i2s_datlen { * @base: mmio register base virtual address * @phys_addr: I2S registers physical base address * @lock_fd: lock to manage race conditions in full duplex mode + * @irq_lock: prevent race condition with IRQ * @dais_name: DAI name * @mclk_rate: master clock frequency (Hz) * @fmt: DAI protocol @@ -221,6 +223,7 @@ struct stm32_i2s_data { void __iomem *base; dma_addr_t phys_addr; spinlock_t lock_fd; /* Manage race conditions for full duplex */ + spinlock_t irq_lock; /* used to prevent race condition with IRQ */ char dais_name[STM32_I2S_DAI_NAME_SIZE]; unsigned int mclk_rate; unsigned int fmt; @@ -246,8 +249,8 @@ static irqreturn_t stm32_i2s_isr(int irq, void *devid) return IRQ_NONE; } - regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG, - I2S_IFCR_MASK, flags); + regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG, + I2S_IFCR_MASK, flags); if (flags & I2S_SR_OVR) { dev_dbg(&pdev->dev, "Overrun\n"); @@ -262,8 +265,10 @@ static irqreturn_t stm32_i2s_isr(int irq, void *devid) if (flags & I2S_SR_TIFRE) dev_dbg(&pdev->dev, "Frame error\n"); - if (err) + spin_lock(&i2s->irq_lock); + if (err && i2s->substream) snd_pcm_stop_xrun(i2s->substream); + spin_unlock(&i2s->irq_lock); return IRQ_HANDLED; } @@ -276,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg) case STM32_I2S_CFG2_REG: case STM32_I2S_IER_REG: case STM32_I2S_SR_REG: - case STM32_I2S_IFCR_REG: case STM32_I2S_TXDR_REG: case STM32_I2S_RXDR_REG: case STM32_I2S_CGFR_REG: @@ -488,7 +492,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai, { struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai); int format = params_width(params); - u32 cfgr, cfgr_mask, cfg1, cfg1_mask; + u32 cfgr, cfgr_mask, cfg1; unsigned int fthlv; int ret; @@ -501,7 +505,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai, switch (format) { case 16: cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_16); - cfgr_mask = I2S_CGFR_DATLEN_MASK; + cfgr_mask = I2S_CGFR_DATLEN_MASK | I2S_CGFR_CHLEN; break; case 32: cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_32) | @@ -529,30 +533,32 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai, if (ret < 0) return ret; - cfg1 = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN; - cfg1_mask = cfg1; - fthlv = STM32_I2S_FIFO_SIZE * I2S_FIFO_TH_ONE_QUARTER / 4; - cfg1 |= I2S_CFG1_FTHVL_SET(fthlv - 1); - cfg1_mask |= I2S_CFG1_FTHVL_MASK; + cfg1 = I2S_CFG1_FTHVL_SET(fthlv - 1); return regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG, - cfg1_mask, cfg1); + I2S_CFG1_FTHVL_MASK, cfg1); } static int stm32_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai); + unsigned long flags; + int ret; + spin_lock_irqsave(&i2s->irq_lock, flags); i2s->substream = substream; + spin_unlock_irqrestore(&i2s->irq_lock, flags); - spin_lock(&i2s->lock_fd); - i2s->refcount++; - spin_unlock(&i2s->lock_fd); + ret = clk_prepare_enable(i2s->i2sclk); + if (ret < 0) { + dev_err(cpu_dai->dev, "Failed to enable clock: %d\n", ret); + return ret; + } - return regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG, - I2S_IFCR_MASK, I2S_IFCR_MASK); + return regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG, + I2S_IFCR_MASK, I2S_IFCR_MASK); } static int stm32_i2s_hw_params(struct snd_pcm_substream *substream, @@ -589,6 +595,10 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd, /* Enable i2s */ dev_dbg(cpu_dai->dev, "start I2S\n"); + cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN; + regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG, + cfg1_mask, cfg1_mask); + ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG, I2S_CR1_SPE, I2S_CR1_SPE); if (ret < 0) { @@ -596,28 +606,29 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd, return ret; } - ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG, - I2S_CR1_CSTART, I2S_CR1_CSTART); + ret = regmap_write_bits(i2s->regmap, STM32_I2S_CR1_REG, + I2S_CR1_CSTART, I2S_CR1_CSTART); if (ret < 0) { dev_err(cpu_dai->dev, "Error %d starting I2S\n", ret); return ret; } - regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG, - I2S_IFCR_MASK, I2S_IFCR_MASK); + regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG, + I2S_IFCR_MASK, I2S_IFCR_MASK); + spin_lock(&i2s->lock_fd); + i2s->refcount++; if (playback_flg) { ier = I2S_IER_UDRIE; } else { ier = I2S_IER_OVRIE; - spin_lock(&i2s->lock_fd); - if (i2s->refcount == 1) - /* dummy write to trigger capture */ + if (STM32_I2S_IS_MASTER(i2s) && i2s->refcount == 1) + /* dummy write to gate bus clocks */ regmap_write(i2s->regmap, STM32_I2S_TXDR_REG, 0); - spin_unlock(&i2s->lock_fd); } + spin_unlock(&i2s->lock_fd); if (STM32_I2S_IS_SLAVE(i2s)) ier |= I2S_IER_TIFREIE; @@ -642,7 +653,6 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd, spin_unlock(&i2s->lock_fd); break; } - spin_unlock(&i2s->lock_fd); dev_dbg(cpu_dai->dev, "stop I2S\n"); @@ -650,8 +660,10 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd, I2S_CR1_SPE, 0); if (ret < 0) { dev_err(cpu_dai->dev, "Error %d disabling I2S\n", ret); + spin_unlock(&i2s->lock_fd); return ret; } + spin_unlock(&i2s->lock_fd); cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN; regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG, @@ -668,11 +680,16 @@ static void stm32_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai); - - i2s->substream = NULL; + unsigned long flags; regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG, I2S_CGFR_MCKOE, (unsigned int)~I2S_CGFR_MCKOE); + + clk_disable_unprepare(i2s->i2sclk); + + spin_lock_irqsave(&i2s->irq_lock, flags); + i2s->substream = NULL; + spin_unlock_irqrestore(&i2s->irq_lock, flags); } static int stm32_i2s_dai_probe(struct snd_soc_dai *cpu_dai) @@ -703,6 +720,7 @@ static const struct regmap_config stm32_h7_i2s_regmap_conf = { .volatile_reg = stm32_i2s_volatile_reg, .writeable_reg = stm32_i2s_writeable_reg, .fast_io = true, + .cache_type = REGCACHE_FLAT, }; static const struct snd_soc_dai_ops stm32_i2s_pcm_dai_ops = { @@ -866,76 +884,68 @@ static int stm32_i2s_probe(struct platform_device *pdev) i2s->pdev = pdev; i2s->ms_flg = I2S_MS_NOT_SET; spin_lock_init(&i2s->lock_fd); + spin_lock_init(&i2s->irq_lock); platform_set_drvdata(pdev, i2s); ret = stm32_i2s_dais_init(pdev, i2s); if (ret) return ret; - i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->base, - i2s->regmap_conf); + i2s->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "pclk", + i2s->base, i2s->regmap_conf); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); return PTR_ERR(i2s->regmap); } - ret = clk_prepare_enable(i2s->pclk); - if (ret) { - dev_err(&pdev->dev, "Enable pclk failed: %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(i2s->i2sclk); - if (ret) { - dev_err(&pdev->dev, "Enable i2sclk failed: %d\n", ret); - goto err_pclk_disable; - } - ret = devm_snd_soc_register_component(&pdev->dev, &stm32_i2s_component, i2s->dai_drv, 1); if (ret) - goto err_clocks_disable; + return ret; ret = devm_snd_dmaengine_pcm_register(&pdev->dev, &stm32_i2s_pcm_config, 0); if (ret) - goto err_clocks_disable; + return ret; /* Set SPI/I2S in i2s mode */ - ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG, - I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD); - if (ret) - goto err_clocks_disable; + return regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG, + I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD); +} - return ret; +MODULE_DEVICE_TABLE(of, stm32_i2s_ids); -err_clocks_disable: - clk_disable_unprepare(i2s->i2sclk); -err_pclk_disable: - clk_disable_unprepare(i2s->pclk); +#ifdef CONFIG_PM_SLEEP +static int stm32_i2s_suspend(struct device *dev) +{ + struct stm32_i2s_data *i2s = dev_get_drvdata(dev); - return ret; + regcache_cache_only(i2s->regmap, true); + regcache_mark_dirty(i2s->regmap); + + return 0; } -static int stm32_i2s_remove(struct platform_device *pdev) +static int stm32_i2s_resume(struct device *dev) { - struct stm32_i2s_data *i2s = platform_get_drvdata(pdev); - - clk_disable_unprepare(i2s->i2sclk); - clk_disable_unprepare(i2s->pclk); + struct stm32_i2s_data *i2s = dev_get_drvdata(dev); - return 0; + regcache_cache_only(i2s->regmap, false); + return regcache_sync(i2s->regmap); } +#endif /* CONFIG_PM_SLEEP */ -MODULE_DEVICE_TABLE(of, stm32_i2s_ids); +static const struct dev_pm_ops stm32_i2s_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stm32_i2s_suspend, stm32_i2s_resume) +}; static struct platform_driver stm32_i2s_driver = { .driver = { .name = "st,stm32-i2s", .of_match_table = stm32_i2s_ids, + .pm = &stm32_i2s_pm_ops, }, .probe = stm32_i2s_probe, - .remove = stm32_i2s_remove, }; module_platform_driver(stm32_i2s_driver); diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c index bcb35cae2a2c..14c9591aae42 100644 --- a/sound/soc/stm/stm32_sai.c +++ b/sound/soc/stm/stm32_sai.c @@ -112,16 +112,21 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client, if (!sai_provider) { dev_err(&sai_client->pdev->dev, "SAI sync provider data not found\n"); - return -EINVAL; + ret = -EINVAL; + goto out_put_dev; } /* Configure sync client */ ret = stm32_sai_sync_conf_client(sai_client, synci); if (ret < 0) - return ret; + goto out_put_dev; /* Configure sync provider */ - return stm32_sai_sync_conf_provider(sai_provider, synco); + ret = stm32_sai_sync_conf_provider(sai_provider, synco); + +out_put_dev: + put_device(&pdev->dev); + return ret; } static int stm32_sai_probe(struct platform_device *pdev) diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index d4825700b63f..f9297228c41c 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -898,7 +898,7 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, struct snd_pcm_hw_params *params) { struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); - int cr1, mask, div = 0; + int div = 0; int sai_clk_rate, mclk_ratio, den; unsigned int rate = params_rate(params); @@ -943,10 +943,8 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, } else { if (sai->mclk_rate) { mclk_ratio = sai->mclk_rate / rate; - if (mclk_ratio == 512) { - mask = SAI_XCR1_OSR; - cr1 = SAI_XCR1_OSR; - } else if (mclk_ratio != 256) { + if ((mclk_ratio != 512) && + (mclk_ratio != 256)) { dev_err(cpu_dai->dev, "Wrong mclk ratio %d\n", mclk_ratio); diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c index df1fed0aa001..d105c90c3706 100644 --- a/sound/soc/sunxi/sun50i-codec-analog.c +++ b/sound/soc/sunxi/sun50i-codec-analog.c @@ -274,7 +274,7 @@ static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = { * stream widgets at the card level. */ - SND_SOC_DAPM_REGULATOR_SUPPLY("hpvcc", 0, 0), + SND_SOC_DAPM_REGULATOR_SUPPLY("cpvdd", 0, 0), SND_SOC_DAPM_MUX("Headphone Source Playback Route", SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src), SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL, @@ -362,7 +362,7 @@ static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = { { "Headphone Source Playback Route", "Mixer", "Left Mixer" }, { "Headphone Source Playback Route", "Mixer", "Right Mixer" }, { "Headphone Amp", NULL, "Headphone Source Playback Route" }, - { "Headphone Amp", NULL, "hpvcc" }, + { "Headphone Amp", NULL, "cpvdd" }, { "HP", NULL, "Headphone Amp" }, /* Microphone Routes */ diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 897483457bf0..f7261fad45c1 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key) snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); fdi = fopen(path, "r"); - if (!fdi) { - p_err("can't open fdinfo: %s", strerror(errno)); + if (!fdi) return NULL; - } while ((n = getline(&line, &line_n, fdi)) > 0) { char *value; @@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key) value = strchr(line, '\t'); if (!value || !value[1]) { - p_err("malformed fdinfo!?"); free(line); return NULL; } @@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key) return line; } - p_err("key '%s' not found in fdinfo", key); free(line); fclose(fdi); return NULL; diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 2037e3dc864b..1ef1ee2280a2 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val, return argv + i; } +/* on per cpu maps we must copy the provided value on all value instances */ +static void fill_per_cpu_value(struct bpf_map_info *info, void *value) +{ + unsigned int i, n, step; + + if (!map_is_per_cpu(info->type)) + return; + + n = get_possible_cpus(); + step = round_up(info->value_size, 8); + for (i = 1; i < n; i++) + memcpy(value + i * step, value, info->value_size); +} + static int parse_elem(char **argv, struct bpf_map_info *info, void *key, void *value, __u32 key_size, __u32 value_size, __u32 *flags, __u32 **value_fd) @@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info, argv = parse_bytes(argv, "value", value, value_size); if (!argv) return -1; + + fill_per_cpu_value(info, value); } return parse_elem(argv, info, key, NULL, key_size, value_size, @@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) jsonw_uint_field(json_wtr, "owner_prog_type", prog_type); } - if (atoi(owner_jited)) - jsonw_bool_field(json_wtr, "owner_jited", true); - else - jsonw_bool_field(json_wtr, "owner_jited", false); + if (owner_jited) + jsonw_bool_field(json_wtr, "owner_jited", + !!atoi(owner_jited)); free(owner_prog_type); free(owner_jited); @@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); char *owner_jited = get_fdinfo(fd, "owner_jited"); - printf("\n\t"); + if (owner_prog_type || owner_jited) + printf("\n\t"); if (owner_prog_type) { unsigned int prog_type = atoi(owner_prog_type); @@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) else printf("owner_prog_type %d ", prog_type); } - if (atoi(owner_jited)) - printf("owner jited"); - else - printf("owner not jited"); + if (owner_jited) + printf("owner%s jited", + atoi(owner_jited) ? "" : " not"); free(owner_prog_type); free(owner_jited); diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2d1bb7d6ff51..b54ed82b9589 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) static int prog_fd_by_tag(unsigned char *tag) { - struct bpf_prog_info info = {}; - __u32 len = sizeof(info); unsigned int id = 0; int err; int fd; while (true) { + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + err = bpf_prog_get_next_id(id, &id); if (err) { p_err("%s", strerror(errno)); diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c index 3040830d7797..84545666a09c 100644 --- a/tools/iio/iio_generic_buffer.c +++ b/tools/iio/iio_generic_buffer.c @@ -330,7 +330,7 @@ static const struct option longopts[] = { int main(int argc, char **argv) { - unsigned long long num_loops = 2; + long long num_loops = 2; unsigned long timedelay = 1000000; unsigned long buf_len = 128; diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index fd92ce8388fc..57aaeaf8e192 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -15,6 +15,8 @@ #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__riscv) #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" +#elif defined(__alpha__) +#include "../../arch/alpha/include/uapi/asm/bitsperlong.h" #else #include <asm-generic/bitsperlong.h> #endif diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/tools/include/uapi/linux/in.h +++ b/tools/include/uapi/linux/in.h @@ -268,7 +268,7 @@ struct sockaddr_in { #define IN_MULTICAST(a) IN_CLASSD(a) #define IN_MULTICAST_NET 0xe0000000 -#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) +#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 095aebdc5bb7..e6150f21267d 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -19,8 +19,11 @@ C2C stands for Cache To Cache. The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows you to track down the cacheline contentions. -The tool is based on x86's load latency and precise store facility events -provided by Intel CPUs. These events provide: +On x86, the tool is based on load latency and precise store facility events +provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling +with thresholding feature. + +These events provide: - memory address of the access - type of the access (load and store details) - latency (in cycles) of the load access @@ -46,7 +49,7 @@ RECORD OPTIONS -l:: --ldlat:: - Configure mem-loads latency. + Configure mem-loads latency. (x86 only) -k:: --all-kernel:: @@ -119,11 +122,16 @@ Following perf record options are configured by default: -W,-d,--phys-data,--sample-cpu Unless specified otherwise with '-e' option, following events are monitored by -default: +default on x86: cpu/mem-loads,ldlat=30/P cpu/mem-stores/P +and following on PowerPC: + + cpu/mem-loads/ + cpu/mem-stores/ + User can pass any 'perf record' option behind '--' mark, like (to enable callchains and system wide monitoring): diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index f8d2167cf3e7..199ea0f0a6c0 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt @@ -82,7 +82,7 @@ RECORD OPTIONS Be more verbose (show counter open errors, etc) --ldlat <n>:: - Specify desired latency for loads event. + Specify desired latency for loads event. (x86 only) In addition, for report all perf report options are valid, and for record all perf record options. diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build index 2e6595310420..ba98bd006488 100644 --- a/tools/perf/arch/powerpc/util/Build +++ b/tools/perf/arch/powerpc/util/Build @@ -2,6 +2,7 @@ libperf-y += header.o libperf-y += sym-handling.o libperf-y += kvm-stat.o libperf-y += perf_regs.o +libperf-y += mem-events.o libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_DWARF) += skip-callchain-idx.o diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c new file mode 100644 index 000000000000..d08311f04e95 --- /dev/null +++ b/tools/perf/arch/powerpc/util/mem-events.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "mem-events.h" + +/* PowerPC does not support 'ldlat' parameter. */ +char *perf_mem_events__name(int i) +{ + if (i == PERF_MEM_EVENTS__LOAD) + return (char *) "cpu/mem-loads/"; + + return (char *) "cpu/mem-stores/"; +} diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index d079f36d342d..ac221f137ed2 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script, .force_header = false, }; struct perf_evsel *ev2; - static bool init; u64 val; - if (!init) { - perf_stat__init_shadow_stats(); - init = true; - } if (!evsel->stats) perf_evlist__alloc_stats(script->session->evlist, false); if (evsel_script(evsel->leader)->gnum++ == 0) @@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script, return; } - if (PRINT_FIELD(TRACE)) { + if (PRINT_FIELD(TRACE) && sample->raw_data) { event_format__fprintf(evsel->tp_format, sample->cpu, sample->raw_data, sample->raw_size, fp); } @@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script) signal(SIGINT, sig_handler); + perf_stat__init_shadow_stats(); + /* override event processing functions */ if (script->show_task_events) { script->tool.comm = process_comm_event; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ed4583128b9c..b36061cd1ab8 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2514,19 +2514,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) { - struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); + bool found = false; + struct perf_evsel *evsel, *tmp; + struct parse_events_error err = { .idx = 0, }; + int ret = parse_events(evlist, "probe:vfs_getname*", &err); - if (IS_ERR(evsel)) + if (ret) return false; - if (perf_evsel__field(evsel, "pathname") == NULL) { + evlist__for_each_entry_safe(evlist, evsel, tmp) { + if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname")) + continue; + + if (perf_evsel__field(evsel, "pathname")) { + evsel->handler = trace__vfs_getname; + found = true; + continue; + } + + list_del_init(&evsel->node); + evsel->evlist = NULL; perf_evsel__delete(evsel); - return false; } - evsel->handler = trace__vfs_getname; - perf_evlist__add(evlist, evsel); - return true; + return found; } static struct perf_evsel *perf_evsel__new_pgfault(u64 config) diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index 44090a9a19f3..e952127e4fb0 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py @@ -1,6 +1,8 @@ #! /usr/bin/python # SPDX-License-Identifier: GPL-2.0 +from __future__ import print_function + import os import sys import glob @@ -8,7 +10,11 @@ import optparse import tempfile import logging import shutil -import ConfigParser + +try: + import configparser +except ImportError: + import ConfigParser as configparser def data_equal(a, b): # Allow multiple values in assignment separated by '|' @@ -100,20 +106,20 @@ class Event(dict): def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); - if not self.has_key(t) or not other.has_key(t): + if t not in self or t not in other: return False if not data_equal(self[t], other[t]): return False return True def optional(self): - if self.has_key('optional') and self['optional'] == '1': + if 'optional' in self and self['optional'] == '1': return True return False def diff(self, other): for t in Event.terms: - if not self.has_key(t) or not other.has_key(t): + if t not in self or t not in other: continue if not data_equal(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) @@ -134,7 +140,7 @@ class Event(dict): # - expected values assignments class Test(object): def __init__(self, path, options): - parser = ConfigParser.SafeConfigParser() + parser = configparser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) @@ -193,7 +199,7 @@ class Test(object): return True def load_events(self, path, events): - parser_event = ConfigParser.SafeConfigParser() + parser_event = configparser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, @@ -207,7 +213,7 @@ class Test(object): # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] - parser_base = ConfigParser.SafeConfigParser() + parser_base = configparser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') @@ -322,9 +328,9 @@ def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() - except Unsup, obj: + except Unsup as obj: log.warning("unsupp %s" % obj.getMsg()) - except Notest, obj: + except Notest as obj: log.warning("skipped %s" % obj.getMsg()) def setup_log(verbose): @@ -363,7 +369,7 @@ def main(): parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", - action="count", dest="verbose") + default=0, action="count", dest="verbose") options, args = parser.parse_args() if args: @@ -373,7 +379,7 @@ def main(): setup_log(options.verbose) if not options.test_dir: - print 'FAILED no -d option specified' + print('FAILED no -d option specified') sys.exit(-1) if not options.test: @@ -382,8 +388,8 @@ def main(): try: run_tests(options) - except Fail, obj: - print "FAILED %s" % obj.getMsg(); + except Fail as obj: + print("FAILED %s" % obj.getMsg()) sys.exit(-1) sys.exit(0) diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c index 5f8501c68da4..5cbba70bcdd0 100644 --- a/tools/perf/tests/evsel-tp-sched.c +++ b/tools/perf/tests/evsel-tp-sched.c @@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, return -1; } - is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED); + is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED); if (should_be_signed && !is_signed) { pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", evsel->name, name, is_signed, should_be_signed); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 1d00e5ec7906..82e16bf84466 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser) return ret; } -static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) +static double disasm__cmp(struct annotation_line *a, struct annotation_line *b, + int percent_type) { int i; for (i = 0; i < a->data_nr; i++) { - if (a->data[i].percent == b->data[i].percent) + if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type]) continue; - return a->data[i].percent < b->data[i].percent; + return a->data[i].percent[percent_type] - + b->data[i].percent[percent_type]; } return 0; } -static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) +static void disasm_rb_tree__insert(struct annotate_browser *browser, + struct annotation_line *al) { + struct rb_root *root = &browser->entries; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct annotation_line *l; @@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line parent = *p; l = rb_entry(parent, struct annotation_line, rb_node); - if (disasm__cmp(al, l)) + if (disasm__cmp(al, l, browser->opts->percent_type) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, RB_CLEAR_NODE(&pos->al.rb_node); continue; } - disasm_rb_tree__insert(&browser->entries, &pos->al); + disasm_rb_tree__insert(browser, &pos->al); } pthread_mutex_unlock(¬es->lock); diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index 89512504551b..39c0004f2886 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp @@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module) } PM.run(*Module); - return std::move(Buffer); + return Buffer; } } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 1ccbd3342069..383674f448fc 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list) if (!cpu_list) return cpu_map__read_all_cpu_map(); - if (!isdigit(*cpu_list)) + /* + * must handle the case of empty cpumap to cover + * TOPOLOGY header for NUMA nodes with no CPU + * ( e.g., because of CPU hotplug) + */ + if (!isdigit(*cpu_list) && *cpu_list != '\0') goto out; while (isdigit(*cpu_list)) { @@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list) if (nr_cpus > 0) cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else + else if (*cpu_list != '\0') cpus = cpu_map__default_new(); + else + cpus = cpu_map__dummy_new(); invalid: free(tmp_cpus); out: diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 93f74d8d3cdd..42c3e5a229d2 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { static char mem_loads_name[100]; static bool mem_loads_name__init; -char *perf_mem_events__name(int i) +char * __weak perf_mem_events__name(int i) { if (i == PERF_MEM_EVENTS__LOAD) { if (!mem_loads_name__init) { diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c index 897589507d97..ea523d3b248f 100644 --- a/tools/perf/util/ordered-events.c +++ b/tools/perf/util/ordered-events.c @@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe) * Current buffer might not have all the events allocated * yet, we need to free only allocated ones ... */ - list_del(&oe->buffer->list); - ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); + if (oe->buffer) { + list_del(&oe->buffer->list); + ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); + } /* ... and continue with the rest */ list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 63f758c655d5..64d1f36dee99 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -17,6 +17,8 @@ if cc == "clang": vars[var] = sub("-mcet", "", vars[var]) if not clang_has_option("-fcf-protection"): vars[var] = sub("-fcf-protection", "", vars[var]) + if not clang_has_option("-fstack-clash-protection"): + vars[var] = sub("-fstack-clash-protection", "", vars[var]) from distutils.core import setup, Extension diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 66a84d5846c8..dca7dfae69ad 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -19,6 +19,20 @@ #define EM_AARCH64 183 /* ARM 64 bit */ #endif +#ifndef ELF32_ST_VISIBILITY +#define ELF32_ST_VISIBILITY(o) ((o) & 0x03) +#endif + +/* For ELF64 the definitions are the same. */ +#ifndef ELF64_ST_VISIBILITY +#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) +#endif + +/* How to extract information held in the st_other field. */ +#ifndef GELF_ST_VISIBILITY +#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) +#endif + typedef Elf64_Nhdr GElf_Nhdr; #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT @@ -87,6 +101,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) return GELF_ST_TYPE(sym->st_info); } +static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) +{ + return GELF_ST_VISIBILITY(sym->st_other); +} + #ifndef STT_GNU_IFUNC #define STT_GNU_IFUNC 10 #endif @@ -111,7 +130,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym) return elf_sym__type(sym) == STT_NOTYPE && sym->st_name != 0 && sym->st_shndx != SHN_UNDEF && - sym->st_shndx != SHN_ABS; + sym->st_shndx != SHN_ABS && + elf_sym__visibility(sym) != STV_HIDDEN && + elf_sym__visibility(sym) != STV_INTERNAL; } static bool elf_sym__filter(GElf_Sym *sym) diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 1a2bd15c5b6e..400ee81a3043 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf TARGETS += efivarfs TARGETS += exec TARGETS += filesystems +TARGETS += filesystems/binderfs TARGETS += firmware TARGETS += ftrace TARGETS += futex diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 315a44fa32af..84fd6f1bf33e 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void) unsigned int start, end, possible_cpus = 0; char buff[128]; FILE *fp; - int n; + int len, n, i, j = 0; fp = fopen(fcpu, "r"); if (!fp) { @@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void) exit(1); } - while (fgets(buff, sizeof(buff), fp)) { - n = sscanf(buff, "%u-%u", &start, &end); - if (n == 0) { - printf("Failed to retrieve # possible CPUs!\n"); - exit(1); - } else if (n == 1) { - end = start; + if (!fgets(buff, sizeof(buff), fp)) { + printf("Failed to read %s!\n", fcpu); + exit(1); + } + + len = strlen(buff); + for (i = 0; i <= len; i++) { + if (buff[i] == ',' || buff[i] == '\0') { + buff[i] = '\0'; + n = sscanf(&buff[j], "%u-%u", &start, &end); + if (n <= 0) { + printf("Failed to retrieve # possible CPUs!\n"); + exit(1); + } else if (n == 1) { + end = start; + } + possible_cpus += end - start + 1; + j = i + 1; } - possible_cpus = start == 0 ? end + 1 : 0; - break; } + fclose(fp); return possible_cpus; diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index a0bd04befe87..91420fa83b08 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = { }, { - .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", + .descr = "func proto (TYPEDEF=>FUNC_PROTO)", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ - BTF_CONST_ENC(4), /* [3] */ - BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ - BTF_FUNC_PROTO_ENC(0, 2), /* [5] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ BTF_FUNC_PROTO_ARG_ENC(0, 1), BTF_FUNC_PROTO_ARG_ENC(0, 2), BTF_END_RAW, @@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = { .key_type_id = 1, .value_type_id = 1, .max_entries = 4, - .btf_load_err = true, - .err_str = "Invalid type_id", }, { diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 147e34cfceb7..02d7c871862a 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -474,6 +474,16 @@ static void test_lpm_delete(void) assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && errno == ENOENT); + key->prefixlen = 30; // unused prefix so far + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -1 && + errno == ENOENT); + + key->prefixlen = 16; // same prefix as the root node + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -1 && + errno == ENOENT); + /* assert initial lookup */ key->prefixlen = 32; inet_pton(AF_INET, "192.168.0.1", key->data); diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh index bab13dd025a6..0d26b5e3f966 100755 --- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh +++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh @@ -37,6 +37,10 @@ prerequisite() exit $ksft_skip fi + present_cpus=`cat $SYSFS/devices/system/cpu/present` + present_max=${present_cpus##*-} + echo "present_cpus = $present_cpus present_max = $present_max" + echo -e "\t Cpus in online state: $online_cpus" offline_cpus=`cat $SYSFS/devices/system/cpu/offline` @@ -151,6 +155,8 @@ online_cpus=0 online_max=0 offline_cpus=0 offline_max=0 +present_cpus=0 +present_max=0 while getopts e:ahp: opt; do case $opt in @@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then online_cpu_expect_success $online_max if [[ $offline_cpus -gt 0 ]]; then - echo -e "\t offline to online to offline: cpu $offline_max" - online_cpu_expect_success $offline_max - offline_cpu_expect_success $offline_max + echo -e "\t offline to online to offline: cpu $present_max" + online_cpu_expect_success $present_max + offline_cpu_expect_success $present_max + online_cpu $present_max fi exit 0 else diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore new file mode 100644 index 000000000000..8a5d9bf63dd4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/.gitignore @@ -0,0 +1 @@ +binderfs_test diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile new file mode 100644 index 000000000000..58cb659b56b4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +CFLAGS += -I../../../../../usr/include/ +TEST_GEN_PROGS := binderfs_test + +include ../../lib.mk diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c new file mode 100644 index 000000000000..8c2ed962e1c7 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <sched.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/mount.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <linux/android/binder.h> +#include <linux/android/binderfs.h> +#include "../../kselftest.h" + +static ssize_t write_nointr(int fd, const void *buf, size_t count) +{ + ssize_t ret; +again: + ret = write(fd, buf, count); + if (ret < 0 && errno == EINTR) + goto again; + + return ret; +} + +static void write_to_file(const char *filename, const void *buf, size_t count, + int allowed_errno) +{ + int fd, saved_errno; + ssize_t ret; + + fd = open(filename, O_WRONLY | O_CLOEXEC); + if (fd < 0) + ksft_exit_fail_msg("%s - Failed to open file %s\n", + strerror(errno), filename); + + ret = write_nointr(fd, buf, count); + if (ret < 0) { + if (allowed_errno && (errno == allowed_errno)) { + close(fd); + return; + } + + goto on_error; + } + + if ((size_t)ret != count) + goto on_error; + + close(fd); + return; + +on_error: + saved_errno = errno; + close(fd); + errno = saved_errno; + + if (ret < 0) + ksft_exit_fail_msg("%s - Failed to write to file %s\n", + strerror(errno), filename); + + ksft_exit_fail_msg("Failed to write to file %s\n", filename); +} + +static void change_to_userns(void) +{ + int ret; + uid_t uid; + gid_t gid; + /* {g,u}id_map files only allow a max of 4096 bytes written to them */ + char idmap[4096]; + + uid = getuid(); + gid = getgid(); + + ret = unshare(CLONE_NEWUSER); + if (ret < 0) + ksft_exit_fail_msg("%s - Failed to unshare user namespace\n", + strerror(errno)); + + write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT); + + ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid); + if (ret < 0 || (size_t)ret >= sizeof(idmap)) + ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", + strerror(errno)); + + write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0); + + ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid); + if (ret < 0 || (size_t)ret >= sizeof(idmap)) + ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", + strerror(errno)); + + write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0); + + ret = setgid(0); + if (ret) + ksft_exit_fail_msg("%s - Failed to setgid(0)\n", + strerror(errno)); + + ret = setuid(0); + if (ret) + ksft_exit_fail_msg("%s - Failed to setgid(0)\n", + strerror(errno)); +} + +static void change_to_mountns(void) +{ + int ret; + + ret = unshare(CLONE_NEWNS); + if (ret < 0) + ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n", + strerror(errno)); + + ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); + if (ret < 0) + ksft_exit_fail_msg("%s - Failed to mount / as private\n", + strerror(errno)); +} + +static void rmdir_protect_errno(const char *dir) +{ + int saved_errno = errno; + (void)rmdir(dir); + errno = saved_errno; +} + +static void __do_binderfs_test(void) +{ + int fd, ret, saved_errno; + size_t len; + ssize_t wret; + bool keep = false; + struct binderfs_device device = { 0 }; + struct binder_version version = { 0 }; + + change_to_mountns(); + + ret = mkdir("/dev/binderfs", 0755); + if (ret < 0) { + if (errno != EEXIST) + ksft_exit_fail_msg( + "%s - Failed to create binderfs mountpoint\n", + strerror(errno)); + + keep = true; + } + + ret = mount(NULL, "/dev/binderfs", "binder", 0, 0); + if (ret < 0) { + if (errno != ENODEV) + ksft_exit_fail_msg("%s - Failed to mount binderfs\n", + strerror(errno)); + + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_skip( + "The Android binderfs filesystem is not available\n"); + } + + /* binderfs mount test passed */ + ksft_inc_pass_cnt(); + + memcpy(device.name, "my-binder", strlen("my-binder")); + + fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC); + if (fd < 0) + ksft_exit_fail_msg( + "%s - Failed to open binder-control device\n", + strerror(errno)); + + ret = ioctl(fd, BINDER_CTL_ADD, &device); + saved_errno = errno; + close(fd); + errno = saved_errno; + if (ret < 0) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg( + "%s - Failed to allocate new binder device\n", + strerror(errno)); + } + + ksft_print_msg( + "Allocated new binder device with major %d, minor %d, and name %s\n", + device.major, device.minor, device.name); + + /* binder device allocation test passed */ + ksft_inc_pass_cnt(); + + fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY); + if (fd < 0) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg("%s - Failed to open my-binder device\n", + strerror(errno)); + } + + ret = ioctl(fd, BINDER_VERSION, &version); + saved_errno = errno; + close(fd); + errno = saved_errno; + if (ret < 0) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg( + "%s - Failed to open perform BINDER_VERSION request\n", + strerror(errno)); + } + + ksft_print_msg("Detected binder version: %d\n", + version.protocol_version); + + /* binder transaction with binderfs binder device passed */ + ksft_inc_pass_cnt(); + + ret = unlink("/dev/binderfs/my-binder"); + if (ret < 0) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg("%s - Failed to delete binder device\n", + strerror(errno)); + } + + /* binder device removal passed */ + ksft_inc_pass_cnt(); + + ret = unlink("/dev/binderfs/binder-control"); + if (!ret) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg("Managed to delete binder-control device\n"); + } else if (errno != EPERM) { + keep ? : rmdir_protect_errno("/dev/binderfs"); + ksft_exit_fail_msg( + "%s - Failed to delete binder-control device but exited with unexpected error code\n", + strerror(errno)); + } + + /* binder-control device removal failed as expected */ + ksft_inc_xfail_cnt(); + +on_error: + ret = umount2("/dev/binderfs", MNT_DETACH); + keep ?: rmdir_protect_errno("/dev/binderfs"); + if (ret < 0) + ksft_exit_fail_msg("%s - Failed to unmount binderfs\n", + strerror(errno)); + + /* binderfs unmount test passed */ + ksft_inc_pass_cnt(); +} + +static void binderfs_test_privileged() +{ + if (geteuid() != 0) + ksft_print_msg( + "Tests are not run as root. Skipping privileged tests\n"); + else + __do_binderfs_test(); +} + +static void binderfs_test_unprivileged() +{ + change_to_userns(); + __do_binderfs_test(); +} + +int main(int argc, char *argv[]) +{ + binderfs_test_privileged(); + binderfs_test_unprivileged(); + ksft_exit_pass(); +} diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config new file mode 100644 index 000000000000..02dd6cc9cf99 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/config @@ -0,0 +1,3 @@ +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_ANDROID_BINDER_IPC=y diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile index f4ba8eb84b95..ad06489c22a5 100644 --- a/tools/testing/selftests/ir/Makefile +++ b/tools/testing/selftests/ir/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 TEST_PROGS := ir_loopback.sh TEST_GEN_PROGS_EXTENDED := ir_loopback +APIDIR := ../../../include/uapi +CFLAGS += -Wall -O2 -I$(APIDIR) include ../lib.mk diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index f8f3e90700c0..1e6d14d2825c 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls KSFT_KHDR_INSTALL := 1 include ../lib.mk -$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma +$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 802b4af18729..1080ff55a788 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -388,6 +388,7 @@ fib_carrier_unicast_test() set -e $IP link set dev dummy0 carrier off + sleep 1 set +e echo " Carrier down" diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh index 8db35b99457c..71d7fdc513c1 100755 --- a/tools/testing/selftests/net/xfrm_policy.sh +++ b/tools/testing/selftests/net/xfrm_policy.sh @@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345 SPI1=0x1 SPI2=0x2 +do_esp_policy() { + local ns=$1 + local me=$2 + local remote=$3 + local lnet=$4 + local rnet=$5 + + # to encrypt packets as they go out (includes forwarded packets that need encapsulation) + ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow + # to fwd decrypted packets after esp processing: + ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow +} + do_esp() { local ns=$1 local me=$2 @@ -40,10 +53,59 @@ do_esp() { ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet - # to encrypt packets as they go out (includes forwarded packets that need encapsulation) - ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow - # to fwd decrypted packets after esp processing: - ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow + do_esp_policy $ns $me $remote $lnet $rnet +} + +# add policies with different netmasks, to make sure kernel carries +# the policies contained within new netmask over when search tree is +# re-built. +# peer netns that are supposed to be encapsulated via esp have addresses +# in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively. +# +# Adding a policy for '10.0.1.0/23' will make it necessary to +# alter the prefix of 10.0.1.0 subnet. +# In case new prefix overlaps with existing node, the node and all +# policies it carries need to be merged with the existing one(s). +# +# Do that here. +do_overlap() +{ + local ns=$1 + + # adds new nodes to tree (neither network exists yet in policy database). + ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block + + # adds a new node in the 10.0.0.0/24 tree (dst node exists). + ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block + + # adds a 10.2.0.0/23 node, but for different dst. + ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block + + # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd. + # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23. + # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node + # also has to be merged too, including source-sorted subtrees. + # old: + # 10.0.0.0/24 (node 1 in dst tree of the bin) + # 10.1.0.0/24 (node in src tree of dst node 1) + # 10.2.0.0/24 (node in src tree of dst node 1) + # 10.0.1.0/24 (node 2 in dst tree of the bin) + # 10.0.2.0/24 (node in src tree of dst node 2) + # 10.2.0.0/24 (node in src tree of dst node 2) + # + # The next 'policy add' adds dst '10.0.0.0/23', which means + # that dst node 1 and dst node 2 have to be merged including + # the sub-tree. As no duplicates are allowed, policies in + # the two '10.0.2.0/24' are also merged. + # + # after the 'add', internal search tree should look like this: + # 10.0.0.0/23 (node in dst tree of bin) + # 10.0.2.0/24 (node in src tree of dst node) + # 10.1.0.0/24 (node in src tree of dst node) + # 10.2.0.0/24 (node in src tree of dst node) + # + # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23. + ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block } do_esp_policy_get_check() { @@ -160,6 +222,41 @@ check_xfrm() { return $lret } +check_exceptions() +{ + logpostfix="$1" + local lret=0 + + # ping to .254 should be excluded from the tunnel (exception is in place). + check_xfrm 0 254 + if [ $? -ne 0 ]; then + echo "FAIL: expected ping to .254 to fail ($logpostfix)" + lret=1 + else + echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)" + fi + + # ping to .253 should use use ipsec due to direct policy exception. + check_xfrm 1 253 + if [ $? -ne 0 ]; then + echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)" + lret=1 + else + echo "PASS: direct policy matches ($logpostfix)" + fi + + # ping to .2 should use ipsec. + check_xfrm 1 2 + if [ $? -ne 0 ]; then + echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)" + lret=1 + else + echo "PASS: policy matches ($logpostfix)" + fi + + return $lret +} + #check for needed privileges if [ "$(id -u)" -ne 0 ];then echo "SKIP: Need root privileges" @@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28 do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 -# ping to .254 should now be excluded from the tunnel -check_xfrm 0 254 +check_exceptions "exceptions" if [ $? -ne 0 ]; then - echo "FAIL: expected ping to .254 to fail" ret=1 -else - echo "PASS: ping to .254 bypassed ipsec tunnel" fi -# ping to .253 should use use ipsec due to direct policy exception. -check_xfrm 1 253 -if [ $? -ne 0 ]; then - echo "FAIL: expected ping to .253 to use ipsec tunnel" - ret=1 -else - echo "PASS: direct policy matches" -fi +# insert block policies with adjacent/overlapping netmasks +do_overlap ns3 -# ping to .2 should use ipsec. -check_xfrm 1 2 +check_exceptions "exceptions and block policies" if [ $? -ne 0 ]; then - echo "FAIL: expected ping to .2 to use ipsec tunnel" ret=1 -else - echo "PASS: policy matches" fi +for n in ns3 ns4;do + ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125 + sleep $((RANDOM%5)) +done + +check_exceptions "exceptions and block policies after hresh changes" + +# full flush of policy db, check everything gets freed incl. internal meta data +ip -net ns3 xfrm policy flush + +do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24 +do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28 + +# move inexact policies to hash table +ip -net ns3 xfrm policy set hthresh4 16 16 + +sleep $((RANDOM%5)) +check_exceptions "exceptions and block policies after hthresh change in ns3" + +# restore original hthresh settings -- move policies back to tables +for n in ns3 ns4;do + ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128 + sleep $((RANDOM%5)) +done +check_exceptions "exceptions and block policies after hresh change to normal" + for i in 1 2 3 4;do ip netns del ns$i;done exit $ret diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 47ed6cef93fb..c9ff2b47bd1c 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for netfilter selftests -TEST_PROGS := nft_trans_stress.sh +TEST_PROGS := nft_trans_stress.sh nft_nat.sh include ../lib.mk diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config index 1017313e41a8..59caa8f71cd8 100644 --- a/tools/testing/selftests/netfilter/config +++ b/tools/testing/selftests/netfilter/config @@ -1,2 +1,2 @@ CONFIG_NET_NS=y -NF_TABLES_INET=y +CONFIG_NF_TABLES_INET=y diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh new file mode 100755 index 000000000000..8ec76681605c --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -0,0 +1,762 @@ +#!/bin/bash +# +# This test is for basic NAT functionality: snat, dnat, redirect, masquerade. +# + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 +ret=0 + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +ip netns add ns0 +ip netns add ns1 +ip netns add ns2 + +ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 +ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 + +ip -net ns0 link set lo up +ip -net ns0 link set veth0 up +ip -net ns0 addr add 10.0.1.1/24 dev veth0 +ip -net ns0 addr add dead:1::1/64 dev veth0 + +ip -net ns0 link set veth1 up +ip -net ns0 addr add 10.0.2.1/24 dev veth1 +ip -net ns0 addr add dead:2::1/64 dev veth1 + +for i in 1 2; do + ip -net ns$i link set lo up + ip -net ns$i link set eth0 up + ip -net ns$i addr add 10.0.$i.99/24 dev eth0 + ip -net ns$i route add default via 10.0.$i.1 + ip -net ns$i addr add dead:$i::99/64 dev eth0 + ip -net ns$i route add default via dead:$i::1 +done + +bad_counter() +{ + local ns=$1 + local counter=$2 + local expect=$3 + + echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 + ip netns exec $ns nft list counter inet filter $counter 1>&2 +} + +check_counters() +{ + ns=$1 + local lret=0 + + cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") + if [ $? -ne 0 ]; then + bad_counter $ns ns0in "packets 1 bytes 84" + lret=1 + fi + cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") + if [ $? -ne 0 ]; then + bad_counter $ns ns0out "packets 1 bytes 84" + lret=1 + fi + + expect="packets 1 bytes 104" + cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter $ns ns0in6 "$expect" + lret=1 + fi + cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter $ns ns0out6 "$expect" + lret=1 + fi + + return $lret +} + +check_ns0_counters() +{ + local ns=$1 + local lret=0 + + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") + if [ $? -ne 0 ]; then + bad_counter ns0 ns0in "packets 0 bytes 0" + lret=1 + fi + + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") + if [ $? -ne 0 ]; then + bad_counter ns0 ns0in6 "packets 0 bytes 0" + lret=1 + fi + + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") + if [ $? -ne 0 ]; then + bad_counter ns0 ns0out "packets 0 bytes 0" + lret=1 + fi + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") + if [ $? -ne 0 ]; then + bad_counter ns0 ns0out6 "packets 0 bytes 0" + lret=1 + fi + + for dir in "in" "out" ; do + expect="packets 1 bytes 84" + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 $ns$dir "$expect" + lret=1 + fi + + expect="packets 1 bytes 104" + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 $ns$dir6 "$expect" + lret=1 + fi + done + + return $lret +} + +reset_counters() +{ + for i in 0 1 2;do + ip netns exec ns$i nft reset counters inet > /dev/null + done +} + +test_local_dnat6() +{ + local lret=0 +ip netns exec ns0 nft -f - <<EOF +table ip6 nat { + chain output { + type nat hook output priority 0; policy accept; + ip6 daddr dead:1::99 dnat to dead:2::99 + } +} +EOF + if [ $? -ne 0 ]; then + echo "SKIP: Could not add add ip6 dnat hook" + return $ksft_skip + fi + + # ping netns1, expect rewrite to netns2 + ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null + if [ $? -ne 0 ]; then + lret=1 + echo "ERROR: ping6 failed" + return $lret + fi + + expect="packets 0 bytes 0" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns1$dir "$expect" + lret=1 + fi + done + + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns2$dir "$expect" + lret=1 + fi + done + + # expect 0 count in ns1 + expect="packets 0 bytes 0" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + # expect 1 packet in ns2 + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns0$dir "$expect" + lret=1 + fi + done + + test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2" + ip netns exec ns0 nft flush chain ip6 nat output + + return $lret +} + +test_local_dnat() +{ + local lret=0 +ip netns exec ns0 nft -f - <<EOF +table ip nat { + chain output { + type nat hook output priority 0; policy accept; + ip daddr 10.0.1.99 dnat to 10.0.2.99 + } +} +EOF + # ping netns1, expect rewrite to netns2 + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + if [ $? -ne 0 ]; then + lret=1 + echo "ERROR: ping failed" + return $lret + fi + + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns1$dir "$expect" + lret=1 + fi + done + + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns2$dir "$expect" + lret=1 + fi + done + + # expect 0 count in ns1 + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + # expect 1 packet in ns2 + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns0$dir "$expect" + lret=1 + fi + done + + test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2" + + ip netns exec ns0 nft flush chain ip nat output + + reset_counters + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + if [ $? -ne 0 ]; then + lret=1 + echo "ERROR: ping failed" + return $lret + fi + + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns1$dir "$expect" + lret=1 + fi + done + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns2$dir "$expect" + lret=1 + fi + done + + # expect 1 count in ns1 + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns0 ns0$dir "$expect" + lret=1 + fi + done + + # expect 0 packet in ns2 + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns2$dir "$expect" + lret=1 + fi + done + + test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush" + + return $lret +} + + +test_masquerade6() +{ + local lret=0 + + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2 via ipv6" + return 1 + lret=1 + fi + + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns2$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + reset_counters + +# add masquerading rule +ip netns exec ns0 nft -f - <<EOF +table ip6 nat { + chain postrouting { + type nat hook postrouting priority 0; policy accept; + meta oif veth0 masquerade + } +} +EOF + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" + lret=1 + fi + + # ns1 should have seen packets from ns0, due to masquerade + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + # ns1 should not have seen packets from ns2, due to masquerade + expect="packets 0 bytes 0" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + ip netns exec ns0 nft flush chain ip6 nat postrouting + if [ $? -ne 0 ]; then + echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 + lret=1 + fi + + test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" + + return $lret +} + +test_masquerade() +{ + local lret=0 + + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: canot ping ns1 from ns2" + lret=1 + fi + + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns2$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + reset_counters + +# add masquerading rule +ip netns exec ns0 nft -f - <<EOF +table ip nat { + chain postrouting { + type nat hook postrouting priority 0; policy accept; + meta oif veth0 masquerade + } +} +EOF + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" + lret=1 + fi + + # ns1 should have seen packets from ns0, due to masquerade + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + # ns1 should not have seen packets from ns2, due to masquerade + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + ip netns exec ns0 nft flush chain ip nat postrouting + if [ $? -ne 0 ]; then + echo "ERROR: Could not flush nat postrouting" 1>&2 + lret=1 + fi + + test $lret -eq 0 && echo "PASS: IP masquerade for ns2" + + return $lret +} + +test_redirect6() +{ + local lret=0 + + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannnot ping ns1 from ns2 via ipv6" + lret=1 + fi + + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns2$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + reset_counters + +# add redirect rule +ip netns exec ns0 nft -f - <<EOF +table ip6 nat { + chain prerouting { + type nat hook prerouting priority 0; policy accept; + meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect + } +} +EOF + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect" + lret=1 + fi + + # ns1 should have seen no packets from ns2, due to redirection + expect="packets 0 bytes 0" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + # ns0 should have seen packets from ns2, due to masquerade + expect="packets 1 bytes 104" + for dir in "in6" "out6" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + ip netns exec ns0 nft delete table ip6 nat + if [ $? -ne 0 ]; then + echo "ERROR: Could not delete ip6 nat table" 1>&2 + lret=1 + fi + + test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2" + + return $lret +} + +test_redirect() +{ + local lret=0 + + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2" + lret=1 + fi + + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns2$dir "$expect" + lret=1 + fi + + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns2 ns1$dir "$expect" + lret=1 + fi + done + + reset_counters + +# add redirect rule +ip netns exec ns0 nft -f - <<EOF +table ip nat { + chain prerouting { + type nat hook prerouting priority 0; policy accept; + meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect + } +} +EOF + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping ns1 from ns2 with active ip redirect" + lret=1 + fi + + # ns1 should have seen no packets from ns2, due to redirection + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + # ns0 should have seen packets from ns2, due to masquerade + expect="packets 1 bytes 84" + for dir in "in" "out" ; do + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter ns1 ns0$dir "$expect" + lret=1 + fi + done + + ip netns exec ns0 nft delete table ip nat + if [ $? -ne 0 ]; then + echo "ERROR: Could not delete nat table" 1>&2 + lret=1 + fi + + test $lret -eq 0 && echo "PASS: IP redirection for ns2" + + return $lret +} + + +# ip netns exec ns0 ping -c 1 -q 10.0.$i.99 +for i in 0 1 2; do +ip netns exec ns$i nft -f - <<EOF +table inet filter { + counter ns0in {} + counter ns1in {} + counter ns2in {} + + counter ns0out {} + counter ns1out {} + counter ns2out {} + + counter ns0in6 {} + counter ns1in6 {} + counter ns2in6 {} + + counter ns0out6 {} + counter ns1out6 {} + counter ns2out6 {} + + map nsincounter { + type ipv4_addr : counter + elements = { 10.0.1.1 : "ns0in", + 10.0.2.1 : "ns0in", + 10.0.1.99 : "ns1in", + 10.0.2.99 : "ns2in" } + } + + map nsincounter6 { + type ipv6_addr : counter + elements = { dead:1::1 : "ns0in6", + dead:2::1 : "ns0in6", + dead:1::99 : "ns1in6", + dead:2::99 : "ns2in6" } + } + + map nsoutcounter { + type ipv4_addr : counter + elements = { 10.0.1.1 : "ns0out", + 10.0.2.1 : "ns0out", + 10.0.1.99: "ns1out", + 10.0.2.99: "ns2out" } + } + + map nsoutcounter6 { + type ipv6_addr : counter + elements = { dead:1::1 : "ns0out6", + dead:2::1 : "ns0out6", + dead:1::99 : "ns1out6", + dead:2::99 : "ns2out6" } + } + + chain input { + type filter hook input priority 0; policy accept; + counter name ip saddr map @nsincounter + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6 + } + chain output { + type filter hook output priority 0; policy accept; + counter name ip daddr map @nsoutcounter + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6 + } +} +EOF +done + +sleep 3 +# test basic connectivity +for i in 1 2; do + ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null + if [ $? -ne 0 ];then + echo "ERROR: Could not reach other namespace(s)" 1>&2 + ret=1 + fi + + ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null + if [ $? -ne 0 ];then + echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 + ret=1 + fi + check_counters ns$i + if [ $? -ne 0 ]; then + ret=1 + fi + + check_ns0_counters ns$i + if [ $? -ne 0 ]; then + ret=1 + fi + reset_counters +done + +if [ $ret -eq 0 ];then + echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" +fi + +reset_counters +test_local_dnat +test_local_dnat6 + +reset_counters +test_masquerade +test_masquerade6 + +reset_counters +test_redirect +test_redirect6 + +for i in 0 1 2; do ip netns del ns$i;done + +exit $ret diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index 9050eeea5f5f..1de8bd8ccf5d 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile @@ -9,6 +9,3 @@ all: $(TEST_PROGS) top_srcdir = ../../../../.. KSFT_KHDR_INSTALL := 1 include ../../lib.mk - -clean: - rm -fr $(TEST_GEN_FILES) diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore index 82121a81681f..29bac5ef9a93 100644 --- a/tools/testing/selftests/proc/.gitignore +++ b/tools/testing/selftests/proc/.gitignore @@ -10,4 +10,5 @@ /proc-uptime-002 /read /self +/setns-dcache /thread-self diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile index 1c12c34cf85d..434d033ee067 100644 --- a/tools/testing/selftests/proc/Makefile +++ b/tools/testing/selftests/proc/Makefile @@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001 TEST_GEN_PROGS += proc-uptime-002 TEST_GEN_PROGS += read TEST_GEN_PROGS += self +TEST_GEN_PROGS += setns-dcache TEST_GEN_PROGS += thread-self include ../lib.mk diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c new file mode 100644 index 000000000000..60ab197a73fc --- /dev/null +++ b/tools/testing/selftests/proc/setns-dcache.c @@ -0,0 +1,129 @@ +/* + * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * Test that setns(CLONE_NEWNET) points to new /proc/net content even + * if old one is in dcache. + * + * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled. + */ +#undef NDEBUG +#include <assert.h> +#include <errno.h> +#include <sched.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <sys/socket.h> + +static pid_t pid = -1; + +static void f(void) +{ + if (pid > 0) { + kill(pid, SIGTERM); + } +} + +int main(void) +{ + int fd[2]; + char _ = 0; + int nsfd; + + atexit(f); + + /* Check for priviledges and syscall availability straight away. */ + if (unshare(CLONE_NEWNET) == -1) { + if (errno == ENOSYS || errno == EPERM) { + return 4; + } + return 1; + } + /* Distinguisher between two otherwise empty net namespaces. */ + if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) { + return 1; + } + + if (pipe(fd) == -1) { + return 1; + } + + pid = fork(); + if (pid == -1) { + return 1; + } + + if (pid == 0) { + if (unshare(CLONE_NEWNET) == -1) { + return 1; + } + + if (write(fd[1], &_, 1) != 1) { + return 1; + } + + pause(); + + return 0; + } + + if (read(fd[0], &_, 1) != 1) { + return 1; + } + + { + char buf[64]; + snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid); + nsfd = open(buf, O_RDONLY); + if (nsfd == -1) { + return 1; + } + } + + /* Reliably pin dentry into dcache. */ + (void)open("/proc/net/unix", O_RDONLY); + + if (setns(nsfd, CLONE_NEWNET) == -1) { + return 1; + } + + kill(pid, SIGTERM); + pid = 0; + + { + char buf[4096]; + ssize_t rv; + int fd; + + fd = open("/proc/net/unix", O_RDONLY); + if (fd == -1) { + return 1; + } + +#define S "Num RefCount Protocol Flags Type St Inode Path\n" + rv = read(fd, buf, sizeof(buf)); + + assert(rv == strlen(S)); + assert(memcmp(buf, S, strlen(S)) == 0); + } + + return 0; +} diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 496a9a8c773a..7e632b465ab4 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally) #ifdef SYSCALL_NUM_RET_SHARE_REG # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) #else -# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) +# define EXPECT_SYSCALL_RETURN(val, action) \ + do { \ + errno = 0; \ + if (val < 0) { \ + EXPECT_EQ(-1, action); \ + EXPECT_EQ(-(val), errno); \ + } else { \ + EXPECT_EQ(val, action); \ + } \ + } while (0) #endif /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for @@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee) /* Architecture-specific syscall changing routine. */ void change_syscall(struct __test_metadata *_metadata, - pid_t tracee, int syscall) + pid_t tracee, int syscall, int result) { int ret; ARCH_REGS regs; @@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata, #ifdef SYSCALL_NUM_RET_SHARE_REG TH_LOG("Can't modify syscall return on this architecture"); #else - regs.SYSCALL_RET = EPERM; + regs.SYSCALL_RET = result; #endif #ifdef HAVE_GETREGS @@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, case 0x1002: /* change getpid to getppid. */ EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); - change_syscall(_metadata, tracee, __NR_getppid); + change_syscall(_metadata, tracee, __NR_getppid, 0); break; case 0x1003: - /* skip gettid. */ + /* skip gettid with valid return code. */ EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); - change_syscall(_metadata, tracee, -1); + change_syscall(_metadata, tracee, -1, 45000); break; case 0x1004: + /* skip openat with error. */ + EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); + change_syscall(_metadata, tracee, -1, -ESRCH); + break; + case 0x1005: /* do nothing (allow getppid) */ EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); break; @@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, nr = get_syscall(_metadata, tracee); if (nr == __NR_getpid) - change_syscall(_metadata, tracee, __NR_getppid); + change_syscall(_metadata, tracee, __NR_getppid, 0); + if (nr == __NR_gettid) + change_syscall(_metadata, tracee, -1, 45000); if (nr == __NR_openat) - change_syscall(_metadata, tracee, -1); + change_syscall(_metadata, tracee, -1, -ESRCH); } FIXTURE_DATA(TRACE_syscall) { @@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall) BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), - BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; @@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected) EXPECT_NE(self->mypid, syscall(__NR_getpid)); } -TEST_F(TRACE_syscall, ptrace_syscall_dropped) +TEST_F(TRACE_syscall, ptrace_syscall_errno) +{ + /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ + teardown_trace_fixture(_metadata, self->tracer); + self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, + true); + + /* Tracer should skip the open syscall, resulting in ESRCH. */ + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); +} + +TEST_F(TRACE_syscall, ptrace_syscall_faked) { /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ teardown_trace_fixture(_metadata, self->tracer); self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, true); - /* Tracer should skip the open syscall, resulting in EPERM. */ - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); + /* Tracer should skip the gettid syscall, resulting fake pid. */ + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); } TEST_F(TRACE_syscall, syscall_allowed) @@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected) EXPECT_NE(self->mypid, syscall(__NR_getpid)); } -TEST_F(TRACE_syscall, syscall_dropped) +TEST_F(TRACE_syscall, syscall_errno) +{ + long ret; + + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); + ASSERT_EQ(0, ret); + + ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); + ASSERT_EQ(0, ret); + + /* openat has been skipped and an errno return. */ + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); +} + +TEST_F(TRACE_syscall, syscall_faked) { long ret; @@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped) ASSERT_EQ(0, ret); /* gettid has been skipped and an altered return value stored. */ - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); - EXPECT_NE(self->mytid, syscall(__NR_gettid)); + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); } TEST_F(TRACE_syscall, skip_after_RET_TRACE) diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index c02683cfb6c9..7656c7ce79d9 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -O3 -Wl,-no-as-needed -Wall -LDFLAGS += -lrt -lpthread -lm +LDLIBS += -lrt -lpthread -lm # these are all "safe" tests that don't modify # system time or require escalated privileges diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9e350fd34504..9c486fad3f9f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) /* Awaken to handle a signal, request we sleep again later. */ kvm_make_request(KVM_REQ_SLEEP, vcpu); } + + /* + * Make sure we will observe a potential reset request if we've + * observed a change to the power state. Pairs with the smp_wmb() in + * kvm_psci_vcpu_on(). + */ + smp_rmb(); } static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) @@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) vcpu_req_sleep(vcpu); + if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) + kvm_reset_vcpu(vcpu); + /* * Clear IRQ_PENDING requests that were made to guarantee * that a VCPU sees new virtual interrupts. diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index fbdf3ac2f001..30251e288629 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vma_pagesize = vma_kernel_pagesize(vma); /* - * PUD level may not exist for a VM but PMD is guaranteed to - * exist. + * The stage2 has a minimum of 2 level table (For arm64 see + * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can + * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). + * As for PUD huge maps, we must make sure that we have at least + * 3 levels, i.e, PMD is not folded. */ if ((vma_pagesize == PMD_SIZE || - (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && + (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && !force_pte) { gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; } diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 9b73d3ad918a..34d08ee63747 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) { + struct vcpu_reset_state *reset_state; struct kvm *kvm = source_vcpu->kvm; struct kvm_vcpu *vcpu = NULL; - struct swait_queue_head *wq; unsigned long cpu_id; - unsigned long context_id; - phys_addr_t target_pc; cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; if (vcpu_mode_is_32bit(source_vcpu)) @@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; } - target_pc = smccc_get_arg2(source_vcpu); - context_id = smccc_get_arg3(source_vcpu); + reset_state = &vcpu->arch.reset_state; - kvm_reset_vcpu(vcpu); - - /* Gracefully handle Thumb2 entry point */ - if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { - target_pc &= ~((phys_addr_t) 1); - vcpu_set_thumb(vcpu); - } + reset_state->pc = smccc_get_arg2(source_vcpu); /* Propagate caller endianness */ - if (kvm_vcpu_is_be(source_vcpu)) - kvm_vcpu_set_be(vcpu); + reset_state->be = kvm_vcpu_is_be(source_vcpu); - *vcpu_pc(vcpu) = target_pc; /* * NOTE: We always update r0 (or x0) because for PSCI v0.1 * the general puspose registers are undefined upon CPU_ON. */ - smccc_set_retval(vcpu, context_id, 0, 0, 0); - vcpu->arch.power_off = false; - smp_mb(); /* Make sure the above is visible */ + reset_state->r0 = smccc_get_arg3(source_vcpu); + + WRITE_ONCE(reset_state->reset, true); + kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); - wq = kvm_arch_vcpu_wq(vcpu); - swake_up_one(wq); + /* + * Make sure the reset request is observed if the change to + * power_state is observed. + */ + smp_wmb(); + + vcpu->arch.power_off = false; + kvm_vcpu_wake_up(vcpu); return PSCI_RET_SUCCESS; } diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 07aa900bac56..1f62f2b8065d 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c @@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) return 0; } - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); print_irq_state(s, irq, vcpu); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); return 0; diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index c0c0b88af1d5..3bdb31eaed64 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm) struct vgic_dist *dist = &kvm->arch.vgic; INIT_LIST_HEAD(&dist->lpi_list_head); - spin_lock_init(&dist->lpi_list_lock); + raw_spin_lock_init(&dist->lpi_list_lock); } /* CREATION */ @@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) irq->intid = i + VGIC_NR_PRIVATE_IRQS; INIT_LIST_HEAD(&irq->ap_list); - spin_lock_init(&irq->irq_lock); + raw_spin_lock_init(&irq->irq_lock); irq->vcpu = NULL; irq->target_vcpu = vcpu0; kref_init(&irq->refcount); @@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; INIT_LIST_HEAD(&vgic_cpu->ap_list_head); - spin_lock_init(&vgic_cpu->ap_list_lock); + raw_spin_lock_init(&vgic_cpu->ap_list_lock); /* * Enable and configure all SGIs to be edge-triggered and @@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; INIT_LIST_HEAD(&irq->ap_list); - spin_lock_init(&irq->irq_lock); + raw_spin_lock_init(&irq->irq_lock); irq->intid = i; irq->vcpu = NULL; irq->target_vcpu = vcpu; @@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) irq->config = VGIC_CONFIG_LEVEL; } - /* - * GICv3 can only be created via the KVM_DEVICE_CREATE API and - * so we always know the emulation type at this point as it's - * either explicitly configured as GICv3, or explicitly - * configured as GICv2, or not configured yet which also - * implies GICv2. - */ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) irq->group = 1; else @@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0, i; + int ret = 0, i, idx; if (vgic_initialized(kvm)) return 0; @@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm) if (ret) goto out; + /* Initialize groups on CPUs created before the VGIC type was known */ + kvm_for_each_vcpu(idx, vcpu, kvm) { + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + + for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { + struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + irq->group = 1; + else + irq->group = 0; + } + } + if (vgic_has_its(kvm)) { ret = vgic_v4_init(kvm); if (ret) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index eb2a390a6c86..ab3f47745d9c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->ap_list); - spin_lock_init(&irq->irq_lock); + raw_spin_lock_init(&irq->irq_lock); irq->config = VGIC_CONFIG_EDGE; kref_init(&irq->refcount); @@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, irq->target_vcpu = vcpu; irq->group = 1; - spin_lock_irqsave(&dist->lpi_list_lock, flags); + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); /* * There could be a race with another vgic_add_lpi(), so we need to @@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, dist->lpi_list_count++; out_unlock: - spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); /* * We "cache" the configuration table entries in our struct vgic_irq's. @@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, if (ret) return ret; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { irq->priority = LPI_PROP_PRIORITY(prop); @@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, } } - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); if (irq->hw) return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); @@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) if (!intids) return -ENOMEM; - spin_lock_irqsave(&dist->lpi_list_lock, flags); + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { if (i == irq_count) break; @@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) continue; intids[i++] = irq->intid; } - spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); *intid_ptr = intids; return i; @@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) int ret = 0; unsigned long flags; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->target_vcpu = vcpu; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); if (irq->hw) { struct its_vlpi_map map; @@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) } irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = pendmask & (1U << bit_nr); vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_put_irq(vcpu->kvm, irq); @@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, return irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING, true); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; vgic_queue_irq_unlock(kvm, irq, flags); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 738b65d2d0e7..b535fffc7400 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; irq->source |= 1U << source_vcpu->vcpu_id; @@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); int target; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->targets = (val >> (i * 8)) & cpu_mask; target = irq->targets ? __ffs(irq->targets) : 0; irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->source &= ~((val >> (i * 8)) & 0xff); if (!irq->source) irq->pending_latch = false; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->source |= (val >> (i * 8)) & 0xff; @@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, irq->pending_latch = true; vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index b3d1f0985117..4a12322bf7df 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, if (!irq) return; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); /* We only care about and preserve Aff0, Aff1 and Aff2. */ irq->mpidr = val & GENMASK(23, 0); irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (test_bit(i, &val)) { /* * pending_latch is set irrespective of irq type @@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { irq->pending_latch = false; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } vgic_put_irq(vcpu->kvm, irq); @@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); /* * An access targetting Group0 SGIs can only generate @@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) irq->pending_latch = true; vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } vgic_put_irq(vcpu->kvm, irq); diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index ceeda7e04a4d..7de42fba05b5 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->group = !!(val & BIT(i)); vgic_queue_irq_unlock(vcpu->kvm, irq, flags); @@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->enabled = true; vgic_queue_irq_unlock(vcpu->kvm, irq, flags); @@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->enabled = false; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); unsigned long flags; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq_is_pending(irq)) value |= (1U << i); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw) vgic_hw_irq_spending(vcpu, irq, is_uaccess); else @@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw) vgic_hw_irq_cpending(vcpu, irq, is_uaccess); else irq->pending_latch = false; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, unsigned long flags; struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw) { vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); @@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, if (irq->active) vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } /* @@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); /* Narrow the priority range to what we actually support */ irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, continue; irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (test_bit(i * 2 + 1, &val)) irq->config = VGIC_CONFIG_EDGE; else irq->config = VGIC_CONFIG_LEVEL; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, * restore irq config before line level. */ new_level = !!(val & (1U << i)); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->line_level = new_level; if (new_level) vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 69b892abd7dc..d91a8938aa7c 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) irq = vgic_get_irq(vcpu->kvm, vcpu, intid); - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); /* Always preserve the active bit */ irq->active = !!(val & GICH_LR_ACTIVE_BIT); @@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) vgic_irq_set_phys_active(irq, false); } - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9c0dd234ebe8..4ee0aeb9a905 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) if (!irq) /* An LPI could have been unmapped. */ continue; - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); /* Always preserve the active bit */ irq->active = !!(val & ICH_LR_ACTIVE_BIT); @@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) vgic_irq_set_phys_active(irq, false); } - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); vgic_put_irq(vcpu->kvm, irq); } @@ -347,9 +347,9 @@ retry: status = val & (1 << bit_nr); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->target_vcpu != vcpu) { - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); goto retry; } irq->pending_latch = status; diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 870b1185173b..abd9c7352677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { * When taking more than one ap_list_lock at the same time, always take the * lowest numbered VCPU's ap_list_lock first, so: * vcpuX->vcpu_id < vcpuY->vcpu_id: - * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); - * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); + * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); + * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); * * Since the VGIC must support injecting virtual interrupts from ISRs, we have - * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer + * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer * spinlocks for any lock that may be taken while injecting an interrupt. */ @@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) struct vgic_irq *irq = NULL; unsigned long flags; - spin_lock_irqsave(&dist->lpi_list_lock, flags); + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { if (irq->intid != intid) @@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) irq = NULL; out_unlock: - spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); return irq; } @@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) if (irq->intid < VGIC_MIN_LPI) return; - spin_lock_irqsave(&dist->lpi_list_lock, flags); + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); if (!kref_put(&irq->refcount, vgic_irq_release)) { - spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); return; }; list_del(&irq->lpi_list); dist->lpi_list_count--; - spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); kfree(irq); } @@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) bool penda, pendb; int ret; - spin_lock(&irqa->irq_lock); - spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&irqa->irq_lock); + raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); if (irqa->active || irqb->active) { ret = (int)irqb->active - (int)irqa->active; @@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) /* Both pending and enabled, sort by priority */ ret = irqa->priority - irqb->priority; out: - spin_unlock(&irqb->irq_lock); - spin_unlock(&irqa->irq_lock); + raw_spin_unlock(&irqb->irq_lock); + raw_spin_unlock(&irqa->irq_lock); return ret; } @@ -325,7 +325,7 @@ retry: * not need to be inserted into an ap_list and there is also * no more work for us to do. */ - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); /* * We have to kick the VCPU here, because we could be @@ -347,12 +347,12 @@ retry: * We must unlock the irq lock to take the ap_list_lock where * we are going to insert this new pending interrupt. */ - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); /* someone can do stuff here, which we re-check below */ - spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); - spin_lock(&irq->irq_lock); + raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_lock(&irq->irq_lock); /* * Did something change behind our backs? @@ -367,10 +367,11 @@ retry: */ if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { - spin_unlock(&irq->irq_lock); - spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_unlock(&irq->irq_lock); + raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, + flags); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); goto retry; } @@ -382,8 +383,8 @@ retry: list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); irq->vcpu = vcpu; - spin_unlock(&irq->irq_lock); - spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_unlock(&irq->irq_lock); + raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_vcpu_kick(vcpu); @@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, if (!irq) return -EINVAL; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (!vgic_validate_injection(irq, level, owner)) { /* Nothing to see here, move along... */ - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); return 0; } @@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, BUG_ON(!irq); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return ret; @@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) if (!irq->hw) goto out; - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->active = false; irq->pending_latch = false; irq->line_level = false; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); out: vgic_put_irq(vcpu->kvm, irq); } @@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); BUG_ON(!irq); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); kvm_vgic_unmap_irq(irq); - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return 0; @@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) return -EINVAL; irq = vgic_get_irq(vcpu->kvm, vcpu, intid); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->owner && irq->owner != owner) ret = -EEXIST; else irq->owner = owner; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); return ret; } @@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); retry: - spin_lock(&vgic_cpu->ap_list_lock); + raw_spin_lock(&vgic_cpu->ap_list_lock); list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; bool target_vcpu_needs_kick = false; - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); BUG_ON(vcpu != irq->vcpu); @@ -616,7 +617,7 @@ retry: */ list_del(&irq->ap_list); irq->vcpu = NULL; - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); /* * This vgic_put_irq call matches the @@ -631,14 +632,14 @@ retry: if (target_vcpu == vcpu) { /* We're on the right CPU */ - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); continue; } /* This interrupt looks like it has to be migrated. */ - spin_unlock(&irq->irq_lock); - spin_unlock(&vgic_cpu->ap_list_lock); + raw_spin_unlock(&irq->irq_lock); + raw_spin_unlock(&vgic_cpu->ap_list_lock); /* * Ensure locking order by always locking the smallest @@ -652,10 +653,10 @@ retry: vcpuB = vcpu; } - spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); - spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, - SINGLE_DEPTH_NESTING); - spin_lock(&irq->irq_lock); + raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); + raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, + SINGLE_DEPTH_NESTING); + raw_spin_lock(&irq->irq_lock); /* * If the affinity has been preserved, move the @@ -675,9 +676,9 @@ retry: target_vcpu_needs_kick = true; } - spin_unlock(&irq->irq_lock); - spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); - spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&irq->irq_lock); + raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); if (target_vcpu_needs_kick) { kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); @@ -687,7 +688,7 @@ retry: goto retry; } - spin_unlock(&vgic_cpu->ap_list_lock); + raw_spin_unlock(&vgic_cpu->ap_list_lock); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) @@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { int w; - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); /* GICv2 SGIs can count for more than one... */ w = vgic_irq_get_lr_count(irq); - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); count += w; *multi_sgi |= (w > 1); @@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) count = 0; list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); /* * If we have multi-SGIs in the pipeline, we need to @@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) * the AP list has been sorted already. */ if (multi_sgi && irq->priority > prio) { - spin_unlock(&irq->irq_lock); + _raw_spin_unlock(&irq->irq_lock); break; } @@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) prio = irq->priority; } - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); if (count == kvm_vgic_global_state.nr_lr) { if (!list_is_last(&irq->ap_list, @@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); - spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); vgic_flush_lr_state(vcpu); - spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); if (can_access_vgic_from_kernel()) vgic_restore_state(vcpu); @@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) vgic_get_vmcr(vcpu, &vmcr); - spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); + raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { - spin_lock(&irq->irq_lock); + raw_spin_lock(&irq->irq_lock); pending = irq_is_pending(irq) && irq->enabled && !irq->active && irq->priority < vmcr.pmr; - spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); if (pending) break; } - spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); + raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); return pending; } @@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) return false; irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); - spin_lock_irqsave(&irq->irq_lock, flags); + raw_spin_lock_irqsave(&irq->irq_lock, flags); map_is_active = irq->hw && irq->active; - spin_unlock_irqrestore(&irq->irq_lock, flags); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return map_is_active; } - diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ecea812cb6a..585845203db8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm, if (ops->init) ops->init(dev); + kvm_get_kvm(kvm); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { + kvm_put_kvm(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); @@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm, return ret; } - kvm_get_kvm(kvm); cd->fd = ret; return 0; } |