diff options
645 files changed, 7692 insertions, 5202 deletions
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index f4445e5a2bbb..1e097037349c 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt @@ -22,6 +22,8 @@ Optional Properties: - pclkN, clkN: Pairs of parent of input clock and input clock to the devices in this power domain. Maximum of 4 pairs (N = 0 to 3) are supported currently. +- power-domains: phandle pointing to the parent power domain, for more details + see Documentation/devicetree/bindings/power/power_domain.txt Node of a device using power domains must have a power-domains property defined with a phandle to respective power domain. diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt index d70ec358736c..8d27f6b084c7 100644 --- a/Documentation/devicetree/bindings/arm/sti.txt +++ b/Documentation/devicetree/bindings/arm/sti.txt @@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties: Required root node property: compatible = "st,stih407"; +Boards with the ST STiH410 SoC shall have the following properties: +Required root node property: +compatible = "st,stih410"; + Boards with the ST STiH418 SoC shall have the following properties: Required root node property: compatible = "st,stih418"; diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index cfcc52705ed8..6151999c5dca 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt @@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in APM X-Gene SoC. Required properties for all the ethernet interfaces: -- compatible: Should be "apm,xgene-enet" +- compatible: Should state binding information from the following list, + - "apm,xgene-enet": RGMII based 1G interface + - "apm,xgene1-sgenet": SGMII based 1G interface + - "apm,xgene1-xgenet": XFI based 10G interface - reg: Address and length of the register set for the device. It contains the information of registers in the same order as described by reg-names - reg-names: Should contain the register set names diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 98c16672ab5f..0f8ed3710c66 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -19,6 +19,16 @@ Required properties: providing multiple PM domains (e.g. power controllers), but can be any value as specified by device tree binding documentation of particular provider. +Optional properties: + - power-domains : A phandle and PM domain specifier as defined by bindings of + the power controller specified by phandle. + Some power domains might be powered from another power domain (or have + other hardware specific dependencies). For representing such dependency + a standard PM domain consumer binding is used. When provided, all domains + created by the given provider should be subdomains of the domain + specified by this binding. More details about power domain specifier are + available in the next section. + Example: power: power-controller@12340000 { @@ -30,6 +40,25 @@ Example: The node above defines a power controller that is a PM domain provider and expects one cell as its phandle argument. +Example 2: + + parent: power-controller@12340000 { + compatible = "foo,power-controller"; + reg = <0x12340000 0x1000>; + #power-domain-cells = <1>; + }; + + child: power-controller@12340000 { + compatible = "foo,power-controller"; + reg = <0x12341000 0x1000>; + power-domains = <&parent 0>; + #power-domain-cells = <1>; + }; + +The nodes above define two power controllers: 'parent' and 'child'. +Domains created by the 'child' power controller are subdomains of '0' power +domain provided by the 'parent' power controller. + ==PM domain consumers== Required properties: diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt index 91d5ab0e60fc..91d5ab0e60fc 100644 --- a/Documentation/devicetree/bindings/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt new file mode 100644 index 000000000000..ebcbb62c0a76 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt @@ -0,0 +1,19 @@ +ETRAX FS UART + +Required properties: +- compatible : "axis,etraxfs-uart" +- reg: offset and length of the register set for the device. +- interrupts: device interrupt + +Optional properties: +- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD + line respectively. + +Example: + +serial@b00260000 { + compatible = "axis,etraxfs-uart"; + reg = <0xb0026000 0x1000>; + interrupts = <68>; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 56742bc70218..7d44eae7ab0b 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt @@ -12,6 +12,9 @@ I. For patch submitters devicetree@vger.kernel.org + and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify + all of the DT maintainers. + 3) The Documentation/ portion of the patch should come in the series before the code implementing the binding. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389ca1347a77..fae26d014aaf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -20,6 +20,7 @@ amlogic Amlogic, Inc. ams AMS AG amstaos AMS-Taos Inc. apm Applied Micro Circuits Corporation (APM) +arasan Arasan Chip Systems arm ARM Ltd. armadeus ARMadeus Systems SARL asahi-kasei Asahi Kasei Corp. @@ -27,6 +28,7 @@ atmel Atmel Corporation auo AU Optronics Corporation avago Avago Technologies avic Shanghai AVIC Optoelectronics Co., Ltd. +axis Axis Communications AB bosch Bosch Sensortec GmbH brcm Broadcom Corporation buffalo Buffalo, Inc. diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index f90e294d7631..a4d869744f59 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt @@ -26,6 +26,11 @@ Optional properties: - atmel,disable : Should be present if you want to disable the watchdog. - atmel,idle-halt : Should be present if you want to stop the watchdog when entering idle state. + CAUTION: This property should be used with care, it actually makes the + watchdog not counting when the CPU is in idle state, therefore the + watchdog reset time depends on mean CPU usage and will not reset at all + if the CPU stop working while it is in idle state, which is probably + not what you want. - atmel,dbg-halt : Should be present if you want to stop the watchdog when entering debug state. diff --git a/MAINTAINERS b/MAINTAINERS index 6239a305dff0..1de6afa8ee51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/ F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig +ARM/FREESCALE VYBRID ARM ARCHITECTURE +M: Shawn Guo <shawn.guo@linaro.org> +M: Sascha Hauer <kernel@pengutronix.de> +R: Stefan Agner <stefan@agner.ch> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +F: arch/arm/mach-imx/*vf610* +F: arch/arm/boot/dts/vf* + ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1176,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-mvebu/ -F: drivers/rtc/armada38x-rtc +F: drivers/rtc/rtc-armada38x.c ARM/Marvell Berlin SoC support M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> @@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support M: Jason Cooper <jason@lakedaemon.net> M: Andrew Lunn <andrew@lunn.ch> M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> +M: Gregory Clement <gregory.clement@free-electrons.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-dove/ @@ -1351,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c F: drivers/*/*rockchip* F: drivers/*/*/*rockchip* F: sound/soc/rockchip/ +N: rockchip ARM/SAMSUNG EXYNOS ARM ARCHITECTURES M: Kukjin Kim <kgene@kernel.org> @@ -1664,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c F: include/linux/platform_data/at24.h ATA OVER ETHERNET (AOE) DRIVER -M: "Ed L. Cashin" <ecashin@coraid.com> -W: http://support.coraid.com/support/linux +M: "Ed L. Cashin" <ed.cashin@acm.org> +W: http://www.openaoe.org/ S: Supported F: Documentation/aoe/ F: drivers/block/aoe/ @@ -1730,7 +1742,7 @@ S: Maintained F: drivers/net/ethernet/atheros/ ATM -M: Chas Williams <chas@cmf.nrl.navy.mil> +M: Chas Williams <3chas3@gmail.com> L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) L: netdev@vger.kernel.org W: http://linux-atm.sourceforge.net @@ -2107,7 +2119,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Christian Daudt <bcm@fixthebug.org> -M: Matt Porter <mporter@linaro.org> M: Florian Fainelli <f.fainelli@gmail.com> L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm @@ -2369,8 +2380,9 @@ F: arch/x86/include/asm/tce.h CAN NETWORK LAYER M: Oliver Hartkopp <socketcan@hartkopp.net> +M: Marc Kleine-Budde <mkl@pengutronix.de> L: linux-can@vger.kernel.org -W: http://gitorious.org/linux-can +W: https://github.com/linux-can T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained @@ -2386,7 +2398,7 @@ CAN NETWORK DRIVERS M: Wolfgang Grandegger <wg@grandegger.com> M: Marc Kleine-Budde <mkl@pengutronix.de> L: linux-can@vger.kernel.org -W: http://gitorious.org/linux-can +W: https://github.com/linux-can T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained @@ -3241,6 +3253,13 @@ S: Maintained F: Documentation/hwmon/dme1737 F: drivers/hwmon/dme1737.c +DMI/SMBIOS SUPPORT +M: Jean Delvare <jdelvare@suse.de> +S: Maintained +F: drivers/firmware/dmi-id.c +F: drivers/firmware/dmi_scan.c +F: include/linux/dmi.h + DOCKING STATION DRIVER M: Shaohua Li <shaohua.li@intel.com> L: linux-acpi@vger.kernel.org @@ -10196,6 +10215,13 @@ S: Maintained F: Documentation/usb/ohci.txt F: drivers/usb/host/ohci* +USB OTG FSM (Finite State Machine) +M: Peter Chen <Peter.Chen@freescale.com> +T: git git://github.com/hzpeterchen/linux-usb.git +L: linux-usb@vger.kernel.org +S: Maintained +F: drivers/usb/common/usb-otg-fsm.c + USB OVER IP DRIVER M: Valentina Manea <valentina.manea.m@gmail.com> M: Shuah Khan <shuah.kh@samsung.com> @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 114234e83caa..edda76fae83f 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { int err; - err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); @@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) if (!err) set_current_blocked(&set); - err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), + err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); return err; @@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) /* Don't restart from sigreturn */ syscall_wont_restart(regs); + /* + * Ensure that sigreturn always returns to user mode (in case the + * regs saved on user stack got fudged between save and sigreturn) + * Otherwise it is easy to panic the kernel with a custom + * signal handler and/or restorer which clobberes the status32/ret + * to return to a bogus location in kernel mode. + */ + regs->status32 |= STATUS_U_MASK; + return regs->r0; badframe: @@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) /* * handler returns using sigreturn stub provided already by userpsace + * If not, nuke the process right away */ - BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); + if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) + return 1; + regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; /* User Stack for signal handler will be above the frame just carved */ @@ -296,12 +308,12 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); - int ret; + int failed; /* Set up the stack frame */ - ret = setup_rt_frame(ksig, oldset, regs); + failed = setup_rt_frame(ksig, oldset, regs); - signal_setup_done(ret, ksig, 0); + signal_setup_done(failed, ksig, 0); } void do_signal(struct pt_regs *regs) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f1f09a2bc9b..cf4c0c99aa25 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -619,6 +619,7 @@ config ARCH_PXA select GENERIC_CLOCKEVENTS select GPIO_PXA select HAVE_IDE + select IRQ_DOMAIN select MULTI_IRQ_HANDLER select PLAT_PXA select SPARSE_IRQ diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7f99cd652203..eb7bb511f853 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin machine-$(CONFIG_ARCH_CLPS711X) += clps711x machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx machine-$(CONFIG_ARCH_DAVINCI) += davinci +machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor machine-$(CONFIG_ARCH_DOVE) += dove machine-$(CONFIG_ARCH_EBSA110) += ebsa110 machine-$(CONFIG_ARCH_EFM32) += efm32 diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 2c6248d9a9ef..c3255e0c90aa 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -301,3 +301,11 @@ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; cd-inverted; }; + +&aes { + status = "okay"; +}; + +&sham { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 83d40f7655e5..6b8493720424 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts @@ -24,11 +24,3 @@ &mmc1 { vmmc-supply = <&ldo3_reg>; }; - -&sham { - status = "okay"; -}; - -&aes { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 7266a00aab2e..5c5667a3624d 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts @@ -328,6 +328,10 @@ dual_emac_res_vlan = <3>; }; +&phy_sel { + rmii-clock-ext; +}; + &mac { pinctrl-names = "default", "sleep"; pinctrl-0 = <&cpsw_default>; diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index 712edce7d6fb..071b56aa0c7e 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi @@ -99,7 +99,7 @@ ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <0>; reg = <0x0664>; }; @@ -107,7 +107,7 @@ ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <1>; reg = <0x0664>; }; @@ -115,7 +115,7 @@ ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <2>; reg = <0x0664>; }; diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c7dc9dab93a4..cfb49686ab6a 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi @@ -107,7 +107,7 @@ ehrpwm0_tbclk: ehrpwm0_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <0>; reg = <0x0664>; }; @@ -115,7 +115,7 @@ ehrpwm1_tbclk: ehrpwm1_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <1>; reg = <0x0664>; }; @@ -123,7 +123,7 @@ ehrpwm2_tbclk: ehrpwm2_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <2>; reg = <0x0664>; }; @@ -131,7 +131,7 @@ ehrpwm3_tbclk: ehrpwm3_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <4>; reg = <0x0664>; }; @@ -139,7 +139,7 @@ ehrpwm4_tbclk: ehrpwm4_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <5>; reg = <0x0664>; }; @@ -147,7 +147,7 @@ ehrpwm5_tbclk: ehrpwm5_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <6>; reg = <0x0664>; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index fff0ee69aab4..e7f0a4ae271c 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -494,12 +494,12 @@ pinctrl_usart3_rts: usart3_rts-0 { atmel,pins = - <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */ + <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; }; pinctrl_usart3_cts: usart3_cts-0 { atmel,pins = - <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */ + <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; }; }; @@ -853,7 +853,7 @@ }; usb1: gadget@fffa4000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9260-udc"; reg = <0xfffa4000 0x4000>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&udc_clk>, <&udpck>; @@ -976,7 +976,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e247b0b5fdab..d55fdf2487ef 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -124,11 +124,12 @@ }; usb1: gadget@fffa4000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9261-udc"; reg = <0xfffa4000 0x4000>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&udc_clk>, <&udpck>; - clock-names = "usb_clk", "udc_clk", "udpck"; + clocks = <&udc_clk>, <&udpck>; + clock-names = "pclk", "hclk"; + atmel,matrix = <&matrix>; status = "disabled"; }; @@ -262,7 +263,7 @@ }; matrix: matrix@ffffee00 { - compatible = "atmel,at91sam9260-bus-matrix"; + compatible = "atmel,at91sam9260-bus-matrix", "syscon"; reg = <0xffffee00 0x200>; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1f67bb4c144e..fce301c4e9d6 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -69,7 +69,7 @@ sram1: sram@00500000 { compatible = "mmio-sram"; - reg = <0x00300000 0x4000>; + reg = <0x00500000 0x4000>; }; ahb { @@ -856,7 +856,7 @@ }; usb1: gadget@fff78000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9263-udc"; reg = <0xfff78000 0x4000>; interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&udc_clk>, <&udpck>; @@ -905,7 +905,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index ee80aa9c0759..488af63d5174 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -1116,7 +1116,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1301,7 +1300,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00800000 0x100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index c2666a7cb5b1..0c53a375ba99 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -894,7 +894,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 818dabdd8c0e..d221179d0f1a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -1066,7 +1066,7 @@ reg = <0x00500000 0x80000 0xf803c000 0x400>; interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; - clocks = <&usb>, <&udphs_clk>; + clocks = <&utmi>, <&udphs_clk>; clock-names = "hclk", "pclk"; status = "disabled"; @@ -1130,7 +1130,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1186,7 +1185,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00700000 0x100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index d3a29c1b8417..afe678f6d2e9 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts @@ -36,6 +36,20 @@ >; }; + mmc_pins: pinmux_mmc_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */ + DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */ + DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */ + DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */ + DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */ + DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */ + DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */ + DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */ + DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */ + >; + }; + usb0_pins: pinmux_usb0_pins { pinctrl-single,pins = < DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ @@ -137,7 +151,12 @@ }; &mmc1 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc_pins>; vmmc-supply = <&vmmcsd_fixed>; + bus-width = <4>; + cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; }; /* At least dm8168-evm rev c won't support multipoint, later may */ diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index 3c97b5f2addc..f35715bc6992 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -150,17 +150,27 @@ }; gpio1: gpio@48032000 { - compatible = "ti,omap3-gpio"; + compatible = "ti,omap4-gpio"; ti,hwmods = "gpio1"; + ti,gpio-always-on; reg = <0x48032000 0x1000>; - interrupts = <97>; + interrupts = <96>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; }; gpio2: gpio@4804c000 { - compatible = "ti,omap3-gpio"; + compatible = "ti,omap4-gpio"; ti,hwmods = "gpio2"; + ti,gpio-always-on; reg = <0x4804c000 0x1000>; - interrupts = <99>; + interrupts = <98>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; }; gpmc: gpmc@50000000 { diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 3290a96ba586..7563d7ce01bb 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -263,17 +263,15 @@ dcan1_pins_default: dcan1_pins_default { pinctrl-single,pins = < - 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ + 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ + 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ >; }; dcan1_pins_sleep: dcan1_pins_sleep { pinctrl-single,pins = < - 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (MUX_MODE15) /* wakeup0.off */ + 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ + 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ >; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 127608d79033..c4659a979c41 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1111,7 +1111,6 @@ "wkupclk", "refclk", "div-clk", "phy-div"; #phy-cells = <0>; - ti,hwmods = "pcie1-phy"; }; pcie2_phy: pciephy@4a095000 { @@ -1130,7 +1129,6 @@ "wkupclk", "refclk", "div-clk", "phy-div"; #phy-cells = <0>; - ti,hwmods = "pcie2-phy"; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index e0264d0bf7b9..40ed539ce474 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -119,17 +119,15 @@ dcan1_pins_default: dcan1_pins_default { pinctrl-single,pins = < - 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ + 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ + 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ >; }; dcan1_pins_sleep: dcan1_pins_sleep { pinctrl-single,pins = < - 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (MUX_MODE15) /* wakeup0.off */ + 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ + 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ >; }; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 4bdcbd61ce47..99b09a44e269 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -243,10 +243,18 @@ ti,invert-autoidle-bit; }; + dpll_core_byp_mux: dpll_core_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x012c>; + }; + dpll_core_ck: dpll_core_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-core-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; }; @@ -309,10 +317,18 @@ clock-div = <1>; }; + dpll_dsp_byp_mux: dpll_dsp_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x0240>; + }; + dpll_dsp_ck: dpll_dsp_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; }; @@ -335,10 +351,18 @@ clock-div = <1>; }; + dpll_iva_byp_mux: dpll_iva_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x01ac>; + }; + dpll_iva_ck: dpll_iva_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; }; @@ -361,10 +385,18 @@ clock-div = <1>; }; + dpll_gpu_byp_mux: dpll_gpu_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x02e4>; + }; + dpll_gpu_ck: dpll_gpu_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; }; @@ -398,10 +430,18 @@ clock-div = <1>; }; + dpll_ddr_byp_mux: dpll_ddr_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x021c>; + }; + dpll_ddr_ck: dpll_ddr_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; }; @@ -416,10 +456,18 @@ ti,invert-autoidle-bit; }; + dpll_gmac_byp_mux: dpll_gmac_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x02b4>; + }; + dpll_gmac_ck: dpll_gmac_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; }; @@ -482,10 +530,18 @@ clock-div = <1>; }; + dpll_eve_byp_mux: dpll_eve_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x0290>; + }; + dpll_eve_ck: dpll_eve_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; }; @@ -1249,10 +1305,18 @@ clock-div = <1>; }; + dpll_per_byp_mux: dpll_per_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x014c>; + }; + dpll_per_ck: dpll_per_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; }; @@ -1275,10 +1339,18 @@ clock-div = <1>; }; + dpll_usb_byp_mux: dpll_usb_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x018c>; + }; + dpll_usb_ck: dpll_usb_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-j-type-clock"; - clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; }; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 277b48b0b6f9..ac6b0ae42caf 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -18,6 +18,7 @@ */ #include "skeleton.dtsi" +#include "exynos4-cpu-thermal.dtsi" #include <dt-bindings/clock/exynos3250.h> / { @@ -193,6 +194,7 @@ interrupts = <0 216 0>; clocks = <&cmu CLK_TMU_APBIF>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi new file mode 100644 index 000000000000..735cb2f10817 --- /dev/null +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi @@ -0,0 +1,52 @@ +/* + * Device tree sources for Exynos4 thermal zone + * + * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal.h> + +/ { +thermal-zones { + cpu_thermal: cpu-thermal { + thermal-sensors = <&tmu 0>; + polling-delay-passive = <0>; + polling-delay = <0>; + trips { + cpu_alert0: cpu-alert-0 { + temperature = <70000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_alert1: cpu-alert-1 { + temperature = <95000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_alert2: cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_crit0: cpu-crit-0 { + temperature = <120000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; + }; + cooling-maps { + map0 { + trip = <&cpu_alert0>; + }; + map1 { + trip = <&cpu_alert1>; + }; + }; + }; +}; +}; diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 76173cacd450..77ea547768f4 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -38,6 +38,7 @@ i2c5 = &i2c_5; i2c6 = &i2c_6; i2c7 = &i2c_7; + i2c8 = &i2c_8; csis0 = &csis_0; csis1 = &csis_1; fimc0 = &fimc_0; @@ -104,6 +105,7 @@ compatible = "samsung,exynos4210-pd"; reg = <0x10023C20 0x20>; #power-domain-cells = <0>; + power-domains = <&pd_lcd0>; }; pd_cam: cam-power-domain@10023C00 { @@ -554,6 +556,22 @@ status = "disabled"; }; + i2c_8: i2c@138E0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "samsung,s3c2440-hdmiphy-i2c"; + reg = <0x138E0000 0x100>; + interrupts = <0 93 0>; + clocks = <&clock CLK_I2C_HDMI>; + clock-names = "i2c"; + status = "disabled"; + + hdmi_i2c_phy: hdmiphy@38 { + compatible = "exynos4210-hdmiphy"; + reg = <0x38>; + }; + }; + spi_0: spi@13920000 { compatible = "samsung,exynos4210-spi"; reg = <0x13920000 0x100>; @@ -663,6 +681,33 @@ status = "disabled"; }; + tmu: tmu@100C0000 { + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + hdmi: hdmi@12D00000 { + compatible = "samsung,exynos4210-hdmi"; + reg = <0x12D00000 0x70000>; + interrupts = <0 92 0>; + clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy", + "mout_hdmi"; + clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, + <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, + <&clock CLK_MOUT_HDMI>; + phy = <&hdmi_i2c_phy>; + power-domains = <&pd_tv>; + samsung,syscon-phandle = <&pmu_system_controller>; + status = "disabled"; + }; + + mixer: mixer@12C10000 { + compatible = "samsung,exynos4210-mixer"; + interrupts = <0 91 0>; + reg = <0x12C10000 0x2100>, <0x12c00000 0x300>; + power-domains = <&pd_tv>; + status = "disabled"; + }; + ppmu_dmc0: ppmu_dmc0@106a0000 { compatible = "samsung,exynos-ppmu"; reg = <0x106a0000 0x2000>; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 3d6652a4b6cb..32c5fd8f6269 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -426,6 +426,25 @@ status = "okay"; }; + tmu@100C0000 { + status = "okay"; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 2 2>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 4 4>; + }; + }; + }; + }; + camera { pinctrl-names = "default"; pinctrl-0 = <>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index b57e6b82ea20..d4f2b11319dd 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -505,6 +505,63 @@ assigned-clock-rates = <0>, <160000000>; }; }; + + hdmi_en: voltage-regulator-hdmi-5v { + compatible = "regulator-fixed"; + regulator-name = "HDMI_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpe0 1 0>; + enable-active-high; + }; + + hdmi_ddc: i2c-ddc { + compatible = "i2c-gpio"; + gpios = <&gpe4 2 0 &gpe4 3 0>; + i2c-gpio,delay-us = <100>; + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&i2c_ddc_bus>; + pinctrl-names = "default"; + status = "okay"; + }; + + mixer@12C10000 { + status = "okay"; + }; + + hdmi@12D00000 { + hpd-gpio = <&gpx3 7 0>; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_hpd>; + hdmi-en-supply = <&hdmi_en>; + vdd-supply = <&ldo3_reg>; + vdd_osc-supply = <&ldo4_reg>; + vdd_pll-supply = <&ldo3_reg>; + ddc = <&hdmi_ddc>; + status = "okay"; + }; + + i2c@138E0000 { + status = "okay"; + }; +}; + +&pinctrl_1 { + hdmi_hpd: hdmi-hpd { + samsung,pins = "gpx3-7"; + samsung,pin-pud = <0>; + }; +}; + +&pinctrl_0 { + i2c_ddc_bus: i2c-ddc-bus { + samsung,pins = "gpe4-2", "gpe4-3"; + samsung,pin-function = <2>; + samsung,pin-pud = <3>; + samsung,pin-drv = <0>; + }; }; &mdma1 { diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 67c832c9dcf1..be89f83f70e7 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -21,6 +21,7 @@ #include "exynos4.dtsi" #include "exynos4210-pinctrl.dtsi" +#include "exynos4-cpu-thermal.dtsi" / { compatible = "samsung,exynos4210", "samsung,exynos4"; @@ -35,10 +36,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@900 { + cpu0: cpu@900 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + cooling-min-level = <4>; + cooling-max-level = <2>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@901 { @@ -153,16 +157,38 @@ reg = <0x03860000 0x1000>; }; - tmu@100C0000 { + tmu: tmu@100C0000 { compatible = "samsung,exynos4210-tmu"; interrupt-parent = <&combiner>; reg = <0x100C0000 0x100>; interrupts = <2 4>; clocks = <&clock CLK_TMU_APBIF>; clock-names = "tmu_apbif"; + samsung,tmu_gain = <15>; + samsung,tmu_reference_voltage = <7>; status = "disabled"; }; + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tmu 0>; + + trips { + cpu_alert0: cpu-alert-0 { + temperature = <85000>; /* millicelsius */ + }; + cpu_alert1: cpu-alert-1 { + temperature = <100000>; /* millicelsius */ + }; + cpu_alert2: cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + }; + }; + }; + }; + g2d@12800000 { compatible = "samsung,s5pv210-g2d"; reg = <0x12800000 0x1000>; @@ -203,6 +229,14 @@ }; }; + mixer: mixer@12C10000 { + clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer", + "sclk_mixer"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>, + <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>; + }; + ppmu_lcd1: ppmu_lcd1@12240000 { compatible = "samsung,exynos-ppmu"; reg = <0x12240000 0x2000>; diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi index dd0a43ec56da..5be03288f1ee 100644 --- a/arch/arm/boot/dts/exynos4212.dtsi +++ b/arch/arm/boot/dts/exynos4212.dtsi @@ -26,10 +26,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@A00 { + cpu0: cpu@A00 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0xA00>; + cooling-min-level = <13>; + cooling-max-level = <7>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@A01 { diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index de80b5bba204..adb4f6a97a1d 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -249,6 +249,20 @@ regulator-always-on; }; + ldo8_reg: ldo@8 { + regulator-compatible = "LDO8"; + regulator-name = "VDD10_HDMI_1.0V"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + + ldo10_reg: ldo@10 { + regulator-compatible = "LDO10"; + regulator-name = "VDDQ_MIPIHSI_1.8V"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + ldo11_reg: LDO11 { regulator-name = "VDD18_ABB1_1.8V"; regulator-min-microvolt = <1800000>; @@ -411,6 +425,51 @@ ehci: ehci@12580000 { status = "okay"; }; + + tmu@100C0000 { + vtmu-supply = <&ldo10_reg>; + status = "okay"; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 7 7>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 13 13>; + }; + }; + }; + }; + + mixer: mixer@12C10000 { + status = "okay"; + }; + + hdmi@12D00000 { + hpd-gpio = <&gpx3 7 0>; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_hpd>; + vdd-supply = <&ldo8_reg>; + vdd_osc-supply = <&ldo10_reg>; + vdd_pll-supply = <&ldo8_reg>; + ddc = <&hdmi_ddc>; + status = "okay"; + }; + + hdmi_ddc: i2c@13880000 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_bus>; + }; + + i2c@138E0000 { + status = "okay"; + }; }; &pinctrl_1 { @@ -425,4 +484,9 @@ samsung,pin-pud = <0>; samsung,pin-drv = <0>; }; + + hdmi_hpd: hdmi-hpd { + samsung,pins = "gpx3-7"; + samsung,pin-pud = <1>; + }; }; diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..e3f7934d19d0 --- /dev/null +++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi @@ -0,0 +1,24 @@ +/* + * Device tree sources for Exynos4412 TMU sensor configuration + * + * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal_exynos.h> + +#thermal-sensor-cells = <0>; +samsung,tmu_gain = <8>; +samsung,tmu_reference_voltage = <16>; +samsung,tmu_noise_cancel_mode = <4>; +samsung,tmu_efuse_value = <55>; +samsung,tmu_min_efuse_value = <40>; +samsung,tmu_max_efuse_value = <100>; +samsung,tmu_first_point_trim = <25>; +samsung,tmu_second_point_trim = <85>; +samsung,tmu_default_temp_offset = <50>; +samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 21f748083586..173ffa479ad3 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -927,6 +927,21 @@ pulldown-ohm = <100000>; /* 100K */ io-channels = <&adc 2>; /* Battery temperature */ }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 7 7>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 13 13>; + }; + }; + }; + }; }; &pmu_system_controller { diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index 0f6ec93bb1d8..68ad43b391ae 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi @@ -26,10 +26,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@A00 { + cpu0: cpu@A00 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0xA00>; + cooling-min-level = <13>; + cooling-max-level = <7>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@A01 { diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index f5e0ae780d6c..6a6abe14fd9b 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi @@ -19,6 +19,7 @@ #include "exynos4.dtsi" #include "exynos4x12-pinctrl.dtsi" +#include "exynos4-cpu-thermal.dtsi" / { aliases { @@ -297,4 +298,15 @@ clock-names = "tmu_apbif"; status = "disabled"; }; + + hdmi: hdmi@12D00000 { + compatible = "samsung,exynos4212-hdmi"; + }; + + mixer: mixer@12C10000 { + compatible = "samsung,exynos4212-mixer"; + clock-names = "mixer", "hdmi", "sclk_hdmi", "vp"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>; + }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9bb1b0b738f5..adbde1adad95 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -20,7 +20,7 @@ #include <dt-bindings/clock/exynos5250.h> #include "exynos5.dtsi" #include "exynos5250-pinctrl.dtsi" - +#include "exynos4-cpu-thermal.dtsi" #include <dt-bindings/clock/exynos-audss-clk.h> / { @@ -58,11 +58,14 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <0>; clock-frequency = <1700000000>; + cooling-min-level = <15>; + cooling-max-level = <9>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@1 { device_type = "cpu"; @@ -102,6 +105,12 @@ #power-domain-cells = <0>; }; + pd_disp1: disp1-power-domain@100440A0 { + compatible = "samsung,exynos4210-pd"; + reg = <0x100440A0 0x20>; + #power-domain-cells = <0>; + }; + clock: clock-controller@10010000 { compatible = "samsung,exynos5250-clock"; reg = <0x10010000 0x30000>; @@ -235,12 +244,32 @@ status = "disabled"; }; - tmu@10060000 { + tmu: tmu@10060000 { compatible = "samsung,exynos5250-tmu"; reg = <0x10060000 0x100>; interrupts = <0 65 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tmu 0>; + + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 9 9>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 15 15>; + }; + }; + }; }; serial@12C00000 { @@ -719,6 +748,7 @@ hdmi: hdmi { compatible = "samsung,exynos4212-hdmi"; reg = <0x14530000 0x70000>; + power-domains = <&pd_disp1>; interrupts = <0 95 0>; clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, @@ -731,9 +761,11 @@ mixer { compatible = "samsung,exynos5250-mixer"; reg = <0x14450000 0x10000>; + power-domains = <&pd_disp1>; interrupts = <0 94 0>; - clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; - clock-names = "mixer", "sclk_hdmi"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>; + clock-names = "mixer", "hdmi", "sclk_hdmi"; }; dp_phy: video-phy@10040720 { @@ -743,6 +775,7 @@ }; dp: dp-controller@145B0000 { + power-domains = <&pd_disp1>; clocks = <&clock CLK_DP>; clock-names = "dp"; phys = <&dp_phy>; @@ -750,6 +783,7 @@ }; fimd: fimd@14400000 { + power-domains = <&pd_disp1>; clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; clock-names = "sclk_fimd", "fimd"; }; diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi new file mode 100644 index 000000000000..5d31fc140823 --- /dev/null +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi @@ -0,0 +1,35 @@ +/* + * Device tree sources for default Exynos5420 thermal zone definition + * + * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +polling-delay-passive = <0>; +polling-delay = <0>; +trips { + cpu-alert-0 { + temperature = <85000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-alert-1 { + temperature = <103000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-crit-0 { + temperature = <1200000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; +}; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 9dc2e9773b30..c0e98cf3514f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -740,8 +740,9 @@ compatible = "samsung,exynos5420-mixer"; reg = <0x14450000 0x10000>; interrupts = <0 94 0>; - clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; - clock-names = "mixer", "sclk_hdmi"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>; + clock-names = "mixer", "hdmi", "sclk_hdmi"; power-domains = <&disp_pd>; }; @@ -782,6 +783,7 @@ interrupts = <0 65 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu1: tmu@10064000 { @@ -790,6 +792,7 @@ interrupts = <0 183 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu2: tmu@10068000 { @@ -798,6 +801,7 @@ interrupts = <0 184 0>; clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu3: tmu@1006c000 { @@ -806,6 +810,7 @@ interrupts = <0 185 0>; clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_gpu: tmu@100a0000 { @@ -814,6 +819,30 @@ interrupts = <0 215 0>; clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu0_thermal: cpu0-thermal { + thermal-sensors = <&tmu_cpu0>; + #include "exynos5420-trip-points.dtsi" + }; + cpu1_thermal: cpu1-thermal { + thermal-sensors = <&tmu_cpu1>; + #include "exynos5420-trip-points.dtsi" + }; + cpu2_thermal: cpu2-thermal { + thermal-sensors = <&tmu_cpu2>; + #include "exynos5420-trip-points.dtsi" + }; + cpu3_thermal: cpu3-thermal { + thermal-sensors = <&tmu_cpu3>; + #include "exynos5420-trip-points.dtsi" + }; + gpu_thermal: gpu-thermal { + thermal-sensors = <&tmu_gpu>; + #include "exynos5420-trip-points.dtsi" + }; }; watchdog: watchdog@101D0000 { diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..7b2fba0ae92b --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi @@ -0,0 +1,24 @@ +/* + * Device tree sources for Exynos5440 TMU sensor configuration + * + * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal_exynos.h> + +#thermal-sensor-cells = <0>; +samsung,tmu_gain = <5>; +samsung,tmu_reference_voltage = <16>; +samsung,tmu_noise_cancel_mode = <4>; +samsung,tmu_efuse_value = <0x5d2d>; +samsung,tmu_min_efuse_value = <16>; +samsung,tmu_max_efuse_value = <76>; +samsung,tmu_first_point_trim = <25>; +samsung,tmu_second_point_trim = <70>; +samsung,tmu_default_temp_offset = <25>; +samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi new file mode 100644 index 000000000000..48adfa8f4300 --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi @@ -0,0 +1,25 @@ +/* + * Device tree sources for default Exynos5440 thermal zone definition + * + * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +polling-delay-passive = <0>; +polling-delay = <0>; +trips { + cpu-alert-0 { + temperature = <100000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "active"; + }; + cpu-crit-0 { + temperature = <1050000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; +}; diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 8f3373cd7b87..59d9416b3b03 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi @@ -219,6 +219,7 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" }; tmuctrl_1: tmuctrl@16011C { @@ -227,6 +228,7 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" }; tmuctrl_2: tmuctrl@160120 { @@ -235,6 +237,22 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu0_thermal: cpu0-thermal { + thermal-sensors = <&tmuctrl_0>; + #include "exynos5440-trip-points.dtsi" + }; + cpu1_thermal: cpu1-thermal { + thermal-sensors = <&tmuctrl_1>; + #include "exynos5440-trip-points.dtsi" + }; + cpu2_thermal: cpu2-thermal { + thermal-sensors = <&tmuctrl_2>; + #include "exynos5440-trip-points.dtsi" + }; }; sata@210000 { diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f1cd2147421d..a626e6dd8022 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -35,6 +35,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio3 22 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_usb_h1_vbus: regulator@1 { @@ -45,6 +46,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio1 29 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_audio: regulator@2 { diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index fda4932faefd..945887d3fdb3 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -52,6 +52,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio4 0 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_usb_otg2_vbus: regulator@1 { @@ -62,6 +63,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio4 2 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_aud3v: regulator@2 { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index f4f78c40b564..3fdc84fddb70 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -92,6 +92,8 @@ ti,hwmods = "aes"; reg = <0x480c5000 0x50>; interrupts = <0>; + dmas = <&sdma 65 &sdma 66>; + dma-names = "tx", "rx"; }; prm: prm@48306000 { @@ -550,6 +552,8 @@ ti,hwmods = "sham"; reg = <0x480c3000 0x64>; interrupts = <49>; + dmas = <&sdma 69>; + dma-names = "rx"; }; smartreflex_core: smartreflex@480cb000 { diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi index 19212ac6eef0..de8a3d456cf7 100644 --- a/arch/arm/boot/dts/omap5-core-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi @@ -13,7 +13,7 @@ core_thermal: core_thermal { polling-delay-passive = <250>; /* milliseconds */ - polling-delay = <1000>; /* milliseconds */ + polling-delay = <500>; /* milliseconds */ /* sensor ID */ thermal-sensors = <&bandgap 2>; diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi index 1b87aca88b77..bc3090f2e84b 100644 --- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi @@ -13,7 +13,7 @@ gpu_thermal: gpu_thermal { polling-delay-passive = <250>; /* milliseconds */ - polling-delay = <1000>; /* milliseconds */ + polling-delay = <500>; /* milliseconds */ /* sensor ID */ thermal-sensors = <&bandgap 1>; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index ddff674bd05e..4a485b63a141 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -1079,4 +1079,8 @@ }; }; +&cpu_thermal { + polling-delay = <500>; /* milliseconds */ +}; + /include/ "omap54xx-clocks.dtsi" diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 58c27466f012..83b425fb3ac2 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi @@ -167,10 +167,18 @@ ti,index-starts-at-one; }; + dpll_core_byp_mux: dpll_core_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x012c>; + }; + dpll_core_ck: dpll_core_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-core-clock"; - clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin>, <&dpll_core_byp_mux>; reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; }; @@ -294,10 +302,18 @@ clock-div = <1>; }; + dpll_iva_byp_mux: dpll_iva_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x01ac>; + }; + dpll_iva_ck: dpll_iva_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_iva_byp_mux>; reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; }; @@ -599,10 +615,19 @@ }; }; &cm_core_clocks { + + dpll_per_byp_mux: dpll_per_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x014c>; + }; + dpll_per_ck: dpll_per_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_per_byp_mux>; reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; }; @@ -714,10 +739,18 @@ ti,index-starts-at-one; }; + dpll_usb_byp_mux: dpll_usb_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x018c>; + }; + dpll_usb_ck: dpll_usb_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-j-type-clock"; - clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_usb_byp_mux>; reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; }; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index d771f687a13b..eccc78d3220b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -411,6 +411,7 @@ "mac_clk_rx", "mac_clk_tx", "clk_mac_ref", "clk_mac_refout", "aclk_mac", "pclk_mac"; + status = "disabled"; }; usb_host0_ehci: usb@ff500000 { diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 261311bdf65b..367af53c1b84 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1248,7 +1248,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1416,7 +1415,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00700000 0x100000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index d986b41b9654..4303874889c6 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -66,6 +66,7 @@ gpio4 = &pioE; tcb0 = &tcb0; tcb1 = &tcb1; + i2c0 = &i2c0; i2c2 = &i2c2; }; cpus { @@ -259,7 +260,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00600000 0x100000>; interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; @@ -461,8 +462,8 @@ lcdck: lcdck { #clock-cells = <0>; - reg = <4>; - clocks = <&smd>; + reg = <3>; + clocks = <&mck>; }; smdck: smdck { @@ -770,7 +771,7 @@ reg = <50>; }; - lcd_clk: lcd_clk { + lcdc_clk: lcdc_clk { #clock-cells = <0>; reg = <51>; }; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 252c3d1bda50..d9176e606173 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -660,7 +660,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0xfff01000 0x1000>; - interrupts = <0 156 4>; + interrupts = <0 155 4>; num-cs = <4>; clocks = <&spi_m_clk>; status = "disabled"; @@ -713,6 +713,9 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + dmas = <&pdma 28>, + <&pdma 29>; + dma-names = "tx", "rx"; }; uart1: serial1@ffc03000 { @@ -722,6 +725,9 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + dmas = <&pdma 30>, + <&pdma 31>; + dma-names = "tx", "rx"; }; rst: rstmgr@ffd05000 { diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts index ab7891c43231..75742f8f96f3 100644 --- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts +++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts @@ -56,6 +56,22 @@ model = "Olimex A10-OLinuXino-LIME"; compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; + cpus { + cpu0: cpu@0 { + /* + * The A10-Lime is known to be unstable + * when running at 1008 MHz + */ + operating-points = < + /* kHz uV */ + 912000 1350000 + 864000 1300000 + 624000 1250000 + >; + cooling-max-level = <2>; + }; + }; + soc@01c00000 { emac: ethernet@01c0b000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5c2925831f20..eebb7853e00b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -75,7 +75,6 @@ clock-latency = <244144>; /* 8 32k periods */ operating-points = < /* kHz uV */ - 1056000 1500000 1008000 1400000 912000 1350000 864000 1300000 @@ -83,7 +82,7 @@ >; #cooling-cells = <2>; cooling-min-level = <0>; - cooling-max-level = <4>; + cooling-max-level = <3>; }; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index f8818f1edbbe..883cb4873688 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -47,7 +47,6 @@ clock-latency = <244144>; /* 8 32k periods */ operating-points = < /* kHz uV */ - 1104000 1500000 1008000 1400000 912000 1350000 864000 1300000 @@ -57,7 +56,7 @@ >; #cooling-cells = <2>; cooling-min-level = <0>; - cooling-max-level = <6>; + cooling-max-level = <5>; }; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 3a8530b79f1c..fdd181792b4b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -105,7 +105,6 @@ clock-latency = <244144>; /* 8 32k periods */ operating-points = < /* kHz uV */ - 1008000 1450000 960000 1400000 912000 1400000 864000 1300000 @@ -116,7 +115,7 @@ >; #cooling-cells = <2>; cooling-min-level = <0>; - cooling-max-level = <7>; + cooling-max-level = <6>; }; cpu@1 { diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index f2670f638e97..811e72bbe642 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -70,6 +70,7 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y +CONFIG_ARM_AT91_ETHER=y CONFIG_MACB=y # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_DM9000=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b7e6b6fba5e0..06075b6d2463 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCIEPORTBUS=y CONFIG_SMP=y -CONFIG_NR_CPUS=8 +CONFIG_NR_CPUS=16 CONFIG_HIGHPTE=y CONFIG_CMA=y CONFIG_ARM_APPENDED_DTB=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a097cffa1231..8e108599e1af 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m CONFIG_OMAP_USB2=m CONFIG_TI_PIPE3=y +CONFIG_TWL4030_USB=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 41d856effe6c..510c747c65b4 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -3,8 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_SLAB=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 38840a812924..8f6a5702b696 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y CONFIG_ARCH_SUNXI=y CONFIG_SMP=y +CONFIG_NR_CPUS=8 CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_HIGHPTE=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index f489fdaa19b8..37fe607a4ede 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y -CONFIG_USB_ISP1760_HCD=y CONFIG_USB_STORAGE=y +CONFIG_USB_ISP1760=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 71e5fc7cfb18..1d1800f71c5b 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped @@ -58,14 +58,18 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -74,8 +78,6 @@ .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2095,9 +2097,11 @@ bsaes_xts_decrypt: vld1.8 {q8}, [r0] @ initial tweak adr r2, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst r9, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne r9, #0x10 @ subtract another 16 bytes +#endif subs r9, #0x80 blo .Lxts_dec_short diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index be068db960ee..a4d3856e7d24 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl @@ -701,14 +701,18 @@ $code.=<<___; # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -717,8 +721,6 @@ $code.=<<___; .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2076,9 +2078,11 @@ bsaes_xts_decrypt: vld1.8 {@XMM[8]}, [r0] @ initial tweak adr $magic, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst $len, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne $len, #0x10 @ subtract another 16 bytes +#endif subs $len, #0x80 blo .Lxts_dec_short diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..4cf48c3aca13 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#define kvm_pgd_index(addr) pgd_index(addr) + static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); return page_count(ptr_page) == 1; } - #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pud_table_empty(kvm, pudp) (0) #define KVM_PREALLOC_LEVEL 0 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) +static inline void *kvm_get_hwpgd(struct kvm *kvm) { - return 0; + return kvm->arch.pgd; } -static inline void kvm_free_hwpgd(struct kvm *kvm) { } - -static inline void *kvm_get_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - return kvm->arch.pgd; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } struct kvm; diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 80a6501b4d50..c3c45e628e33 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -18,8 +18,11 @@ #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ #endif -/* Keep in sync with mach-at91/include/mach/hardware.h */ +#ifdef CONFIG_MMU #define AT91_IO_P2V(x) ((x) - 0x01000000) +#else +#define AT91_IO_P2V(x) (x) +#endif #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..1d60bebea4b8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { - unsigned int mmfr0; - /* Revised CPUID format. Read the Memory Model Feature * Register 0 and check for VMSAv7 or PMSAv7 */ - asm("mrc p15, 0, %0, c0, c1, 4" - : "=r" (mmfr0)); + unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); if ((mmfr0 & 0x0000000f) >= 0x00000003 || (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..5656d79c5a44 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t addr = start, end = start + size; phys_addr_t next; - pgd = pgdp + pgd_index(addr); + pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) @@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); @@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); } +/* Free the HW pgd, one page at a time */ +static void kvm_free_hwpgd(void *hwpgd) +{ + free_pages_exact(hwpgd, kvm_get_hwpgd_size()); +} + +/* Allocate the HW PGD, making sure that each page gets its own refcount */ +static void *kvm_alloc_hwpgd(void) +{ + unsigned int size = kvm_get_hwpgd_size(); + + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); +} + /** * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. @@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) */ int kvm_alloc_stage2_pgd(struct kvm *kvm) { - int ret; pgd_t *pgd; + void *hwpgd; if (kvm->arch.pgd != NULL) { kvm_err("kvm_arch already initialized?\n"); return -EINVAL; } + hwpgd = kvm_alloc_hwpgd(); + if (!hwpgd) + return -ENOMEM; + + /* When the kernel uses more levels of page tables than the + * guest, we allocate a fake PGD and pre-populate it to point + * to the next-level page table, which will be the real + * initial page table pointed to by the VTTBR. + * + * When KVM_PREALLOC_LEVEL==2, we allocate a single page for + * the PMD and the kernel will use folded pud. + * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD + * pages. + */ if (KVM_PREALLOC_LEVEL > 0) { + int i; + /* * Allocate fake pgd for the page table manipulation macros to * work. This is not used by the hardware and we have no @@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO); + + if (!pgd) { + kvm_free_hwpgd(hwpgd); + return -ENOMEM; + } + + /* Plug the HW PGD into the fake one. */ + for (i = 0; i < PTRS_PER_S2_PGD; i++) { + if (KVM_PREALLOC_LEVEL == 1) + pgd_populate(NULL, pgd + i, + (pud_t *)hwpgd + i * PTRS_PER_PUD); + else if (KVM_PREALLOC_LEVEL == 2) + pud_populate(NULL, pud_offset(pgd, 0) + i, + (pmd_t *)hwpgd + i * PTRS_PER_PMD); + } } else { /* * Allocate actual first-level Stage-2 page table used by the * hardware for Stage-2 page table walks. */ - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + pgd = (pgd_t *)hwpgd; } - if (!pgd) - return -ENOMEM; - - ret = kvm_prealloc_hwpgd(kvm, pgd); - if (ret) - goto out_err; - kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; return 0; -out_err: - if (KVM_PREALLOC_LEVEL > 0) - kfree(pgd); - else - free_pages((unsigned long)pgd, S2_PGD_ORDER); - return ret; } /** @@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) return; unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - kvm_free_hwpgd(kvm); + kvm_free_hwpgd(kvm_get_hwpgd(kvm)); if (KVM_PREALLOC_LEVEL > 0) kfree(kvm->arch.pgd); - else - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); + kvm->arch.pgd = NULL; } @@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pgd_t *pgd; pud_t *pud; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); if (WARN_ON(pgd_none(*pgd))) { if (!cache) return NULL; @@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) pgd_t *pgd; phys_addr_t next; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { /* * Release kvm_mmu_lock periodically if the memory region is diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5e34fb143309..aa4116e9452f 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void) phys_addr_t sram_pbase; unsigned long sram_base; struct device_node *node; - struct platform_device *pdev; + struct platform_device *pdev = NULL; - node = of_find_compatible_node(NULL, NULL, "mmio-sram"); - if (!node) { - pr_warn("%s: failed to find sram node!\n", __func__); - return; + for_each_compatible_node(node, NULL, "mmio-sram") { + pdev = of_find_device_by_node(node); + if (pdev) { + of_node_put(node); + break; + } } - pdev = of_find_device_by_node(node); if (!pdev) { pr_warn("%s: failed to find sram device!\n", __func__); - goto put_node; + return; } sram_pool = dev_get_gen_pool(&pdev->dev); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - goto put_node; + return; } sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); if (!sram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); - goto put_node; + return; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); - -put_node: - of_node_put(node); } #endif diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index d2c89963af2d..86c0aa819d25 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void) " mcr p15, 0, %0, c7, c0, 4\n\t" " str %5, [%1, %2]" : - : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), + : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), "r" (1), "r" (AT91RM9200_SDRAMC_SRR), "r" (lpr)); } diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 556151e85ec4..931f0e302c03 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S @@ -25,11 +25,6 @@ */ #undef SLOWDOWN_MASTER_CLOCK -#define MCKRDY_TIMEOUT 1000 -#define MOSCRDY_TIMEOUT 1000 -#define PLLALOCK_TIMEOUT 1000 -#define PLLBLOCK_TIMEOUT 1000 - pmc .req r0 sdramc .req r1 ramc1 .req r2 @@ -41,60 +36,42 @@ tmp2 .req r5 * Wait until master clock is ready (after switching master clock source) */ .macro wait_mckrdy - mov tmp2, #MCKRDY_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MCKRDY beq 1b -2: .endm /* * Wait until master oscillator has stabilized. */ .macro wait_moscrdy - mov tmp2, #MOSCRDY_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCS beq 1b -2: .endm /* * Wait until PLLA has locked. */ .macro wait_pllalock - mov tmp2, #PLLALOCK_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_LOCKA beq 1b -2: .endm /* * Wait until PLLB has locked. */ .macro wait_pllblock - mov tmp2, #PLLBLOCK_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_LOCKB beq 1b -2: .endm .text + .arm + /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, * void __iomem *ramc1, int memctrl) */ @@ -134,6 +111,16 @@ ddr_sr_enable: cmp memctrl, #AT91_MEMCTRL_DDRSDR bne sdr_sr_enable + /* LPDDR1 --> force DDR2 mode during self-refresh */ + ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR] + str tmp1, .saved_sam9_mdr + bic tmp1, tmp1, #~AT91_DDRSDRC_MD + cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR + ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR] + biceq tmp1, tmp1, #AT91_DDRSDRC_MD + orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2 + streq tmp1, [sdramc, #AT91_DDRSDRC_MDR] + /* prepare for DDRAM self-refresh mode */ ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] str tmp1, .saved_sam9_lpr @@ -142,14 +129,26 @@ ddr_sr_enable: /* figure out if we use the second ram controller */ cmp ramc1, #0 - ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR] - strne tmp2, .saved_sam9_lpr1 - bicne tmp2, #AT91_DDRSDRC_LPCB - orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH + beq ddr_no_2nd_ctrl + + ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR] + str tmp2, .saved_sam9_mdr1 + bic tmp2, tmp2, #~AT91_DDRSDRC_MD + cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR + ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR] + biceq tmp2, tmp2, #AT91_DDRSDRC_MD + orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2 + streq tmp2, [ramc1, #AT91_DDRSDRC_MDR] + + ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR] + str tmp2, .saved_sam9_lpr1 + bic tmp2, #AT91_DDRSDRC_LPCB + orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH /* Enable DDRAM self-refresh mode */ + str tmp2, [ramc1, #AT91_DDRSDRC_LPR] +ddr_no_2nd_ctrl: str tmp1, [sdramc, #AT91_DDRSDRC_LPR] - strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] b sdr_sr_done @@ -208,6 +207,7 @@ sdr_sr_done: /* Turn off the main oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCEN + orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait for interrupt */ @@ -216,6 +216,7 @@ sdr_sr_done: /* Turn on the main oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCEN + orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscrdy @@ -280,12 +281,17 @@ sdr_sr_done: */ cmp memctrl, #AT91_MEMCTRL_DDRSDR bne sdr_en_restore + /* Restore MDR in case of LPDDR1 */ + ldr tmp1, .saved_sam9_mdr + str tmp1, [sdramc, #AT91_DDRSDRC_MDR] /* Restore LPR on AT91 with DDRAM */ ldr tmp1, .saved_sam9_lpr str tmp1, [sdramc, #AT91_DDRSDRC_LPR] /* if we use the second ram controller */ cmp ramc1, #0 + ldrne tmp2, .saved_sam9_mdr1 + strne tmp2, [ramc1, #AT91_DDRSDRC_MDR] ldrne tmp2, .saved_sam9_lpr1 strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] @@ -319,5 +325,11 @@ ram_restored: .saved_sam9_lpr1: .word 0 +.saved_sam9_mdr: + .word 0 + +.saved_sam9_mdr1: + .word 0 + ENTRY(at91_slow_clock_sz) .word .-at91_slow_clock diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 3f32c47a6d74..d2e9f12d12f1 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) */ void exynos_cpu_power_down(int cpu) { - if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || - of_machine_is_compatible("samsung,exynos5800"))) { + if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { /* * Bypass power down for CPU0 during suspend. Check for * the SYS_PWR_REG value to decide if we are suspending diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 20f267121b3e..37266a826437 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -161,6 +161,34 @@ no_clk: of_genpd_add_provider_simple(np, &pd->pd); } + /* Assign the child power domains to their parents */ + for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { + struct generic_pm_domain *child_domain, *parent_domain; + struct of_phandle_args args; + + args.np = np; + args.args_count = 0; + child_domain = of_genpd_get_from_provider(&args); + if (!child_domain) + continue; + + if (of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", 0, &args) != 0) + continue; + + parent_domain = of_genpd_get_from_provider(&args); + if (!parent_domain) + continue; + + if (pm_genpd_add_subdomain(parent_domain, child_domain)) + pr_warn("%s failed to add subdomain: %s\n", + parent_domain->name, child_domain->name); + else + pr_info("%s has as child subdomain: %s.\n", + parent_domain->name, child_domain->name); + of_node_put(np); + } + return 0; } arch_initcall(exynos4_pm_init_power_domain); diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 52e2b1a2fddb..318d127df147 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; static u32 exynos_irqwake_intmask = 0xffffffff; static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { - { 73, BIT(1) }, /* RTC alarm */ - { 74, BIT(2) }, /* RTC tick */ + { 105, BIT(1) }, /* RTC alarm */ + { 106, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 4ad6e473cf83..9de3412af406 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void) * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad * (external OSC), and we need to clear the bit. */ - clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : - IMX6Q_GPR1_ENET_CLK_SEL_PAD; + clksel = clk_is_match(ptp_clk, enet_ref) ? + IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : + IMX6Q_GPR1_ENET_CLK_SEL_PAD; gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2a2f4d56e4c8..25f1beea453e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -720,6 +720,8 @@ static const char * __init omap_get_family(void) return kasprintf(GFP_KERNEL, "OMAP4"); else if (soc_is_omap54xx()) return kasprintf(GFP_KERNEL, "OMAP5"); + else if (soc_is_am33xx() || soc_is_am335x()) + return kasprintf(GFP_KERNEL, "AM33xx"); else if (soc_is_am43xx()) return kasprintf(GFP_KERNEL, "AM43xx"); else if (soc_is_dra7xx()) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 92afb723dcfc..355b08936871 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) if (ret == -EBUSY) pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); - if (!ret) { + if (oh->clkdm) { /* * Set the clockdomain to HW_AUTO, assuming that the * previous state was HW_AUTO. */ - if (oh->clkdm && hwsup) + if (hwsup) clkdm_allow_idle(oh->clkdm); - } else { - if (oh->clkdm) - clkdm_hwmod_disable(oh->clkdm, oh); + + clkdm_hwmod_disable(oh->clkdm, oh); } return ret; @@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh) INIT_LIST_HEAD(&oh->master_ports); INIT_LIST_HEAD(&oh->slave_ports); spin_lock_init(&oh->_lock); + lockdep_set_class(&oh->_lock, &oh->hwmod_key); oh->_state = _HWMOD_STATE_REGISTERED; diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9d4bec6ee742..9611c91d9b82 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -674,6 +674,7 @@ struct omap_hwmod { u32 _sysc_cache; void __iomem *_mpu_rt_va; spinlock_t _lock; + struct lock_class_key hwmod_key; /* unique lock class */ struct list_head node; struct omap_hwmod_ocp_if *_mpu_port; unsigned int (*xlate_irq)(unsigned int); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e8692e7675b8..16fe7a1b7a35 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = { * */ -static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { +static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { .name = "pcie", }; /* pcie1 */ -static struct omap_hwmod dra7xx_pcie1_hwmod = { +static struct omap_hwmod dra7xx_pciess1_hwmod = { .name = "pcie1", - .class = &dra7xx_pcie_hwmod_class, + .class = &dra7xx_pciess_hwmod_class, .clkdm_name = "pcie_clkdm", .main_clk = "l4_root_clk_div", .prcm = { .omap4 = { - .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* pcie2 */ -static struct omap_hwmod dra7xx_pcie2_hwmod = { - .name = "pcie2", - .class = &dra7xx_pcie_hwmod_class, - .clkdm_name = "pcie_clkdm", - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'PCIE PHY' class - * - */ - -static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = { - .name = "pcie-phy", -}; - -/* pcie1 phy */ -static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { - .name = "pcie1-phy", - .class = &dra7xx_pcie_phy_hwmod_class, - .clkdm_name = "l3init_clkdm", - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, @@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { }, }; -/* pcie2 phy */ -static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { - .name = "pcie2-phy", - .class = &dra7xx_pcie_phy_hwmod_class, - .clkdm_name = "l3init_clkdm", +/* pcie2 */ +static struct omap_hwmod dra7xx_pciess2_hwmod = { + .name = "pcie2", + .class = &dra7xx_pciess_hwmod_class, + .clkdm_name = "pcie_clkdm", .main_clk = "l4_root_clk_div", .prcm = { .omap4 = { @@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l3_main_1 -> pcie1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { +/* l3_main_1 -> pciess1 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pcie1_hwmod, + .slave = &dra7xx_pciess1_hwmod, .clk = "l3_iclk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_cfg -> pcie1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { +/* l4_cfg -> pciess1 */ +static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie1_hwmod, + .slave = &dra7xx_pciess1_hwmod, .clk = "l4_root_clk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l3_main_1 -> pcie2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { +/* l3_main_1 -> pciess2 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pcie2_hwmod, + .slave = &dra7xx_pciess2_hwmod, .clk = "l3_iclk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_cfg -> pcie2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie2_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pcie1 phy */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie1_phy_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pcie2 phy */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = { +/* l4_cfg -> pciess2 */ +static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie2_phy_hwmod, + .slave = &dra7xx_pciess2_hwmod, .clk = "l4_root_clk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; @@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_cfg__mpu, &dra7xx_l4_cfg__ocp2scp1, &dra7xx_l4_cfg__ocp2scp3, - &dra7xx_l3_main_1__pcie1, - &dra7xx_l4_cfg__pcie1, - &dra7xx_l3_main_1__pcie2, - &dra7xx_l4_cfg__pcie2, - &dra7xx_l4_cfg__pcie1_phy, - &dra7xx_l4_cfg__pcie2_phy, + &dra7xx_l3_main_1__pciess1, + &dra7xx_l4_cfg__pciess1, + &dra7xx_l3_main_1__pciess2, + &dra7xx_l4_cfg__pciess2, &dra7xx_l3_main_1__qspi, &dra7xx_l4_per3__rtcss, &dra7xx_l4_cfg__sata, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 190fa43e7479..e642b079e9f3 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void) static void __init omap3_evm_legacy_init(void) { + hsmmc2_internal_input_clk(); legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); } diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index a08a617a6c11..d6d6bc39e05c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQSTATUS_MPU_OFFSET); + OMAP4_PRM_IRQENABLE_MPU_OFFSET); saved_mask[1] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); + OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 7d8eab857a93..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -36,6 +36,7 @@ #include <linux/platform_data/video-pxafb.h> #include <mach/bitfield.h> #include <linux/platform_data/mmc-pxamci.h> +#include <linux/smc91x.h> #include "generic.h" #include "devices.h" diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 0eecd83c624e..89a7c06570d3 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -11,6 +11,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/bitops.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> @@ -40,7 +41,6 @@ #define ICHP_VAL_IRQ (1 << 31) #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) #define IPR_VALID (1 << 31) -#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f) #define MAX_INTERNAL_IRQS 128 @@ -51,6 +51,7 @@ static void __iomem *pxa_irq_base; static int pxa_internal_irq_nr; static bool cpu_has_ipr; +static struct irq_domain *pxa_irq_domain; static inline void __iomem *irq_base(int i) { @@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i) void pxa_mask_irq(struct irq_data *d) { void __iomem *base = irq_data_get_irq_chip_data(d); + irq_hw_number_t irq = irqd_to_hwirq(d); uint32_t icmr = __raw_readl(base + ICMR); - icmr &= ~(1 << IRQ_BIT(d->irq)); + icmr &= ~BIT(irq & 0x1f); __raw_writel(icmr, base + ICMR); } void pxa_unmask_irq(struct irq_data *d) { void __iomem *base = irq_data_get_irq_chip_data(d); + irq_hw_number_t irq = irqd_to_hwirq(d); uint32_t icmr = __raw_readl(base + ICMR); - icmr |= 1 << IRQ_BIT(d->irq); + icmr |= BIT(irq & 0x1f); __raw_writel(icmr, base + ICMR); } @@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) } while (1); } -void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) +static int pxa_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) { - int irq, i, n; + void __iomem *base = irq_base(hw / 32); - BUG_ON(irq_nr > MAX_INTERNAL_IRQS); + /* initialize interrupt priority */ + if (cpu_has_ipr) + __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); + + irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, + handle_level_irq); + irq_set_chip_data(virq, base); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static struct irq_domain_ops pxa_irq_ops = { + .map = pxa_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static __init void +pxa_init_irq_common(struct device_node *node, int irq_nr, + int (*fn)(struct irq_data *, unsigned int)) +{ + int n; pxa_internal_irq_nr = irq_nr; - cpu_has_ipr = !cpu_is_pxa25x(); - pxa_irq_base = io_p2v(0x40d00000); + pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, + PXA_IRQ(0), 0, + &pxa_irq_ops, NULL); + if (!pxa_irq_domain) + panic("Unable to add PXA IRQ domain\n"); + irq_set_default_host(pxa_irq_domain); for (n = 0; n < irq_nr; n += 32) { void __iomem *base = irq_base(n >> 5); __raw_writel(0, base + ICMR); /* disable all IRQs */ __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ - for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { - /* initialize interrupt priority */ - if (cpu_has_ipr) - __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); - - irq = PXA_IRQ(i); - irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, base); - set_irq_flags(irq, IRQF_VALID); - } } - /* only unmasked interrupts kick us out of idle */ __raw_writel(1, irq_base(0) + ICCR); pxa_internal_irq_chip.irq_set_wake = fn; } +void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) +{ + BUG_ON(irq_nr > MAX_INTERNAL_IRQS); + + pxa_irq_base = io_p2v(0x40d00000); + cpu_has_ipr = !cpu_is_pxa25x(); + pxa_init_irq_common(NULL, irq_nr, fn); +} + #ifdef CONFIG_PM static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; @@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = { }; #ifdef CONFIG_OF -static struct irq_domain *pxa_irq_domain; - -static int pxa_irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - void __iomem *base = irq_base(hw / 32); - - /* initialize interrupt priority */ - if (cpu_has_ipr) - __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); - - irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, - handle_level_irq); - irq_set_chip_data(hw, base); - set_irq_flags(hw, IRQF_VALID); - - return 0; -} - -static struct irq_domain_ops pxa_irq_ops = { - .map = pxa_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - static const struct of_device_id intc_ids[] __initconst = { { .compatible = "marvell,pxa-intc", }, {} @@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) { struct device_node *node; struct resource res; - int n, ret; + int ret; node = of_find_matching_node(NULL, intc_ids); if (!node) { @@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) return; } - pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, - &pxa_irq_ops, NULL); - if (!pxa_irq_domain) - panic("Unable to add PXA IRQ domain\n"); - - irq_set_default_host(pxa_irq_domain); - - for (n = 0; n < pxa_internal_irq_nr; n += 32) { - void __iomem *base = irq_base(n >> 5); - - __raw_writel(0, base + ICMR); /* disable all IRQs */ - __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ - } - - /* only unmasked interrupts kick us out of idle */ - __raw_writel(1, irq_base(0) + ICCR); - - pxa_internal_irq_chip.irq_set_wake = fn; + pxa_init_irq_common(node, pxa_internal_irq_nr, fn); } #endif /* CONFIG_OF */ diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 28da319d389f..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = { }; struct smc91x_platdata smc91x_platdata = { - .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct platform_device smc91x_device = { diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 205f9bf3821e..ac2ae5c71ab4 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = { }; static struct platform_device can_regulator_device = { - .name = "reg-fixed-volage", + .name = "reg-fixed-voltage", .id = 0, .dev = { .platform_data = &can_regulator_pdata, diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 7b0cd3172354..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev) .id = 0, .res = smc91x_resources, .num_res = ARRAY_SIZE(smc91x_resources), - .data = &smc91c_platdata, - .size_data = sizeof(smc91c_platdata), + .data = &smc91x_platdata, + .size_data = sizeof(smc91x_platdata), }; int ret, irq; diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 696fd0fe4806..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c @@ -54,7 +54,7 @@ static struct platform_device smc91x_device = { .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { - .platform_data = &smc91c_platdata, + .platform_data = &smc91x_platdata, }, }; diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 483cb467bf65..a0f3b1cd497c 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end; extern unsigned long socfpga_cpu1start_addr; -#define SOCFPGA_SCU_VIRT_BASE 0xfffec000 +#define SOCFPGA_SCU_VIRT_BASE 0xfee00000 #endif diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 383d61e138af..f5e597c207b9 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -23,6 +23,7 @@ #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/cacheflush.h> #include "core.h" @@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void) (u32 *) &socfpga_cpu1start_addr)) pr_err("SMP: Need cpu1-start-addr in device tree.\n"); + /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ + smp_wmb(); + sync_cache_w(&socfpga_cpu1start_addr); + sys_manager_base_addr = of_iomap(np, 0); np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b067390cef4e..b373acade338 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c @@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = { "st,stih415", "st,stih416", "st,stih407", + "st,stih410", "st,stih418", NULL }; diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -1,10 +1,12 @@ menuconfig ARCH_SUNXI bool "Allwinner SoCs" if ARCH_MULTI_V7 select ARCH_REQUIRE_GPIOLIB + select ARCH_HAS_RESET_CONTROLLER select CLKSRC_MMIO select GENERIC_IRQ_CHIP select PINCTRL select SUN4I_TIMER + select RESET_CONTROLLER if ARCH_SUNXI @@ -20,10 +22,8 @@ config MACH_SUN5I config MACH_SUN6I bool "Allwinner A31 (sun6i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC select MFD_SUN6I_PRCM - select RESET_CONTROLLER select SUN5I_HSTIMER config MACH_SUN7I @@ -37,16 +37,12 @@ config MACH_SUN7I config MACH_SUN8I bool "Allwinner A23 (sun8i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC select MFD_SUN6I_PRCM - select RESET_CONTROLLER config MACH_SUN9I bool "Allwinner (sun9i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC - select RESET_CONTROLLER endif diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, } ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); - if (ret) - return; - - switch (assoc) { - case 16: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - case 8: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - default: - pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", - assoc); - break; + if (!ret) { + switch (assoc) { + case 16: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + case 8: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + default: + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", + assoc); + break; + } } prefetch = l2x0_saved_regs.prefetch_ctrl; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) < max_pfn) { + dma_to_pfn(dev, ~0) < max_pfn - 1) { if (warn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); + show_pte(current->mm, addr); info.si_signo = inf->sig; info.si_errno = 0; diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (!is_module_address(start) || !is_module_address(end - 1)) + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || start >= MODULES_END) return -EINVAL; data.set_mask = set_mask; diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index db10169a08de..8ca94d379bc3 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct of_device_id *match; const struct dmtimer_platform_data *pdata; + int ret; match = of_match_device(of_match_ptr(omap_timer_match), dev); pdata = match ? match->data : dev->platform_data; @@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev) } if (!timer->reserved) { - pm_runtime_get_sync(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "%s: pm_runtime_get_sync failed!\n", + __func__); + goto err_get_sync; + } __omap_dm_timer_init_regs(timer); pm_runtime_put(dev); } @@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev) dev_dbg(dev, "Device Probed.\n"); return 0; + +err_get_sync: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; } /** @@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev) } spin_unlock_irqrestore(&dm_timer_lock, flags); + pm_runtime_disable(&pdev->dev); + return ret; } diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -622,7 +622,7 @@ }; sgenet0: ethernet@1f210000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-sgenet"; status = "disabled"; reg = <0x0 0x1f210000 0x0 0xd100>, <0x0 0x1f200000 0x0 0Xc300>, @@ -636,7 +636,7 @@ }; xgenet: ethernet@1f610000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-xgenet"; status = "disabled"; reg = <0x0 0x1f610000 0x0 0xd100>, <0x0 0x1f600000 0x0 0Xc300>, diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi @@ -8,7 +8,7 @@ */ /* SoC fixed clocks */ - soc_uartclk: refclk72738khz { + soc_uartclk: refclk7273800hz { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <7273800>; diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) -#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) -#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) - -#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ - cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ - o1, o2, n1, n2) +#define _protect_cmpxchg_local(pcp, o, n) \ +({ \ + typeof(*raw_cpu_ptr(&(pcp))) __ret; \ + preempt_disable(); \ + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) + +#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + preempt_disable(); \ + __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ + raw_cpu_ptr(&(ptr2)), \ + o1, o2, n1, n2); \ + preempt_enable(); \ + __ret; \ +}) #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..54bb4ba97441 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, { unsigned int cpu = smp_processor_id(); + /* + * init_mm.pgd does not contain any user mappings and it is always + * active for kernel addresses in TTBR1. Just set the reserved TTBR0. + */ + if (next == &init_mm) { + cpu_set_reserved_ttbr0(); + return; + } + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) check_and_switch_context(next, tsk); } diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, return ret; } +#define _percpu_read(pcp) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable(); \ + __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ + sizeof(pcp)); \ + preempt_enable(); \ + __retval; \ +}) + +#define _percpu_write(pcp, val) \ +do { \ + preempt_disable(); \ + __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ + sizeof(pcp)); \ + preempt_enable(); \ +} while(0) \ + +#define _pcp_protect(operation, pcp, val) \ +({ \ + typeof(pcp) __retval; \ + preempt_disable(); \ + __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ + (val), sizeof(pcp)); \ + preempt_enable(); \ + __retval; \ +}) + #define _percpu_add(pcp, val) \ - __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + _pcp_protect(__percpu_add, pcp, val) -#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) +#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) #define _percpu_and(pcp, val) \ - __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + _pcp_protect(__percpu_and, pcp, val) #define _percpu_or(pcp, val) \ - __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) - -#define _percpu_read(pcp) (typeof(pcp)) \ - (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) - -#define _percpu_write(pcp, val) \ - __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) + _pcp_protect(__percpu_or, pcp, val) #define _percpu_xchg(pcp, val) (typeof(pcp)) \ - (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#define cpu_switch_mm(pgd,mm) \ +do { \ + BUG_ON(pgd == swapper_pg_dir); \ + cpu_do_switch_mm(virt_to_phys(pgd),mm); \ +} while (0) #define cpu_get_pgd() \ ({ \ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); pgtable_page_dtor(pte); tlb_remove_entry(tlb, pte); } @@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4abe9b945f77..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end } /* + * Used to invalidate the TLB (walk caches) corresponding to intermediate page + * table levels (pgd/pud/pmd). + */ +static inline void __flush_tlb_pgtable(struct mm_struct *mm, + unsigned long uaddr) +{ + unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); + + dsb(ishst); + asm("tlbi vae1is, %0" : : "r" (addr)); + dsb(ish); +} +/* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ static inline void update_mmu_cache(struct vm_area_struct *vma, diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); @@ -354,3 +358,12 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) * zeroing of .bss would clobber it. */ .pushsection .data..cacheline_aligned -ENTRY(__boot_cpu_mode) .align L1_CACHE_SHIFT +ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long 0 .popsection diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <stdarg.h> #include <linux/compat.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -150,6 +151,13 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2bdde04..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); -static void *__alloc_from_pool(size_t size, struct page **ret_page) +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; @@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) *ret_page = phys_to_page(phys); ptr = (void *)val; + if (flags & __GFP_ZERO) + memset(ptr, 0, size); } return ptr; @@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; + void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return page_address(page); + addr = page_address(page); + if (flags & __GFP_ZERO) + memset(addr, 0, size); + return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } @@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!coherent && !(flags & __GFP_WAIT)) { struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page); + void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h @@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; */ #define pgtable_cache_init() do { } while (0) +/* + * c6x is !MMU, so define the simpliest implementation + */ +#define pgprot_writecombine pgprot_noncached + #include <asm-generic/pgtable.h> #endif /* _ASM_C6X_PGTABLE_H */ diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h index 9359e5048442..d5779b0ec573 100644 --- a/arch/metag/include/asm/io.h +++ b/arch/metag/include/asm/io.h @@ -2,6 +2,7 @@ #define _ASM_METAG_IO_H #include <linux/types.h> +#include <asm/pgtable-bits.h> #define IO_SPACE_LIMIT 0 diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h new file mode 100644 index 000000000000..25ba6729f496 --- /dev/null +++ b/arch/metag/include/asm/pgtable-bits.h @@ -0,0 +1,104 @@ +/* + * Meta page table definitions. + */ + +#ifndef _METAG_PGTABLE_BITS_H +#define _METAG_PGTABLE_BITS_H + +#include <asm/metag_mem.h> + +/* + * Definitions for MMU descriptors + * + * These are the hardware bits in the MMCU pte entries. + * Derived from the Meta toolkit headers. + */ +#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT +#define _PAGE_WRITE MMCU_ENTRY_WR_BIT +#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT +/* Write combine bit - this can cause writes to occur out of order */ +#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT +/* Sys coherent bit - this bit is never used by Linux */ +#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT +#define _PAGE_ALWAYS_ZERO_1 0x020 +#define _PAGE_CACHE_CTRL0 0x040 +#define _PAGE_CACHE_CTRL1 0x080 +#define _PAGE_ALWAYS_ZERO_2 0x100 +#define _PAGE_ALWAYS_ZERO_3 0x200 +#define _PAGE_ALWAYS_ZERO_4 0x400 +#define _PAGE_ALWAYS_ZERO_5 0x800 + +/* These are software bits that we stuff into the gaps in the hardware + * pte entries that are not used. Note, these DO get stored in the actual + * hardware, but the hardware just does not use them. + */ +#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 +#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 + +/* Pages owned, and protected by, the kernel. */ +#define _PAGE_KERNEL _PAGE_PRIV + +/* No cacheing of this page */ +#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) +/* burst cacheing - good for data streaming */ +#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) +/* One cache way per thread */ +#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) +/* Full on cacheing */ +#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) + +#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) + +/* which bits are used for cache control ... */ +#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ + _PAGE_WR_COMBINE) + +/* This is a mask of the bits that pte_modify is allowed to change. */ +#define _PAGE_CHG_MASK (PAGE_MASK) + +#define _PAGE_SZ_SHIFT 1 +#define _PAGE_SZ_4K (0x0) +#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) +#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) + +#if defined(CONFIG_PAGE_SIZE_4K) +#define _PAGE_SZ (_PAGE_SZ_4K) +#elif defined(CONFIG_PAGE_SIZE_8K) +#define _PAGE_SZ (_PAGE_SZ_8K) +#elif defined(CONFIG_PAGE_SIZE_16K) +#define _PAGE_SZ (_PAGE_SZ_16K) +#endif +#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) + +#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) +# define _PAGE_SZHUGE (_PAGE_SZ_8K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) +# define _PAGE_SZHUGE (_PAGE_SZ_16K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) +# define _PAGE_SZHUGE (_PAGE_SZ_32K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +# define _PAGE_SZHUGE (_PAGE_SZ_64K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) +# define _PAGE_SZHUGE (_PAGE_SZ_128K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) +# define _PAGE_SZHUGE (_PAGE_SZ_256K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +# define _PAGE_SZHUGE (_PAGE_SZ_512K) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) +# define _PAGE_SZHUGE (_PAGE_SZ_1M) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) +# define _PAGE_SZHUGE (_PAGE_SZ_2M) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) +# define _PAGE_SZHUGE (_PAGE_SZ_4M) +#endif + +#endif /* _METAG_PGTABLE_BITS_H */ diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index d0604c0a8702..ffa3a3a2ecad 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h @@ -5,6 +5,7 @@ #ifndef _METAG_PGTABLE_H #define _METAG_PGTABLE_H +#include <asm/pgtable-bits.h> #include <asm-generic/pgtable-nopmd.h> /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ @@ -21,100 +22,6 @@ #endif /* - * Definitions for MMU descriptors - * - * These are the hardware bits in the MMCU pte entries. - * Derived from the Meta toolkit headers. - */ -#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT -#define _PAGE_WRITE MMCU_ENTRY_WR_BIT -#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT -/* Write combine bit - this can cause writes to occur out of order */ -#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT -/* Sys coherent bit - this bit is never used by Linux */ -#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT -#define _PAGE_ALWAYS_ZERO_1 0x020 -#define _PAGE_CACHE_CTRL0 0x040 -#define _PAGE_CACHE_CTRL1 0x080 -#define _PAGE_ALWAYS_ZERO_2 0x100 -#define _PAGE_ALWAYS_ZERO_3 0x200 -#define _PAGE_ALWAYS_ZERO_4 0x400 -#define _PAGE_ALWAYS_ZERO_5 0x800 - -/* These are software bits that we stuff into the gaps in the hardware - * pte entries that are not used. Note, these DO get stored in the actual - * hardware, but the hardware just does not use them. - */ -#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 -#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 - -/* Pages owned, and protected by, the kernel. */ -#define _PAGE_KERNEL _PAGE_PRIV - -/* No cacheing of this page */ -#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) -/* burst cacheing - good for data streaming */ -#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) -/* One cache way per thread */ -#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) -/* Full on cacheing */ -#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) - -#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) - -/* which bits are used for cache control ... */ -#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ - _PAGE_WR_COMBINE) - -/* This is a mask of the bits that pte_modify is allowed to change. */ -#define _PAGE_CHG_MASK (PAGE_MASK) - -#define _PAGE_SZ_SHIFT 1 -#define _PAGE_SZ_4K (0x0) -#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) -#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) - -#if defined(CONFIG_PAGE_SIZE_4K) -#define _PAGE_SZ (_PAGE_SZ_4K) -#elif defined(CONFIG_PAGE_SIZE_8K) -#define _PAGE_SZ (_PAGE_SZ_8K) -#elif defined(CONFIG_PAGE_SIZE_16K) -#define _PAGE_SZ (_PAGE_SZ_16K) -#endif -#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) -# define _PAGE_SZHUGE (_PAGE_SZ_8K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) -# define _PAGE_SZHUGE (_PAGE_SZ_16K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) -# define _PAGE_SZHUGE (_PAGE_SZ_32K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -# define _PAGE_SZHUGE (_PAGE_SZ_64K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) -# define _PAGE_SZHUGE (_PAGE_SZ_128K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) -# define _PAGE_SZHUGE (_PAGE_SZ_256K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -# define _PAGE_SZHUGE (_PAGE_SZ_512K) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) -# define _PAGE_SZHUGE (_PAGE_SZ_1M) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) -# define _PAGE_SZHUGE (_PAGE_SZ_2M) -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) -# define _PAGE_SZHUGE (_PAGE_SZ_4M) -#endif - -/* * The Linux memory management assumes a three-level page table setup. On * Meta, we use that, but "fold" the mid level into the top-level page * table. diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -348,8 +348,9 @@ C_ENTRY(_user_exception): * The LP register should point to the location where the called function * should return. [note that MAKE_SYS_CALL uses label 1] */ /* See if the system call number is valid */ + blti r12, 5f addi r11, r12, -__NR_syscalls; - bgei r11,5f; + bgei r11, 5f; /* Figure out which function to use for this system call. */ /* Note Microblaze barrel shift is optional, so don't rely on it */ add r12, r12, r12; /* convert num -> ptr */ @@ -375,7 +376,7 @@ C_ENTRY(_user_exception): /* The syscall number is invalid, return an error. */ 5: - rtsd r15, 8; /* looks like a normal subroutine return */ + braid ret_from_trap addi r3, r0, -ENOSYS; /* Entry point used to return from a syscall/trap */ @@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): bri 1b /* Maybe handle a signal */ -5: +5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; beqi r11, 4f; /* Signals to handle, handle them */ diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h @@ -15,7 +15,54 @@ #include <uapi/asm/ptrace.h> +/* This struct defines the way the registers are stored on the + stack during a system call. */ + #ifndef __ASSEMBLY__ +struct pt_regs { + unsigned long r8; /* r8-r15 Caller-saved GP registers */ + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r1; /* Assembler temporary */ + unsigned long r2; /* Retval LS 32bits */ + unsigned long r3; /* Retval MS 32bits */ + unsigned long r4; /* r4-r7 Register arguments */ + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long orig_r2; /* Copy of r2 ?? */ + unsigned long ra; /* Return address */ + unsigned long fp; /* Frame pointer */ + unsigned long sp; /* Stack pointer */ + unsigned long gp; /* Global pointer */ + unsigned long estatus; + unsigned long ea; /* Exception return address (pc) */ + unsigned long orig_r7; +}; + +/* + * This is the extended stack used by signal handlers and the context + * switcher: it's pushed after the normal "struct pt_regs". + */ +struct switch_stack { + unsigned long r16; /* r16-r23 Callee-saved GP registers */ + unsigned long r17; + unsigned long r18; + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long fp; + unsigned long gp; + unsigned long ra; +}; + #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) #define instruction_pointer(regs) ((regs)->ra) diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> - * Copyright (C) 2004 Microtronix Datacom Ltd - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_NIOS2_UCONTEXT_H -#define _ASM_NIOS2_UCONTEXT_H - -typedef int greg_t; -#define NGREG 32 -typedef greg_t gregset_t[NGREG]; - -struct mcontext { - int version; - gregset_t gregs; -}; - -#define MCONTEXT_VERSION 2 - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct mcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..e0bb972a50d7 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild @@ -1,4 +1,5 @@ include include/uapi/asm-generic/Kbuild.asm header-y += elf.h -header-y += ucontext.h + +generic-y += ucontext.h diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h @@ -50,9 +50,7 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG \ - ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ - sizeof(elf_greg_t)) +#define ELF_NGREG 49 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef unsigned long elf_fpregset_t; diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..71a330597adf 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h @@ -67,53 +67,9 @@ #define NUM_PTRACE_REG (PTR_TLBMISC + 1) -/* this struct defines the way the registers are stored on the - stack during a system call. - - There is a fake_regs in setup.c that has to match pt_regs.*/ - -struct pt_regs { - unsigned long r8; /* r8-r15 Caller-saved GP registers */ - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - unsigned long r1; /* Assembler temporary */ - unsigned long r2; /* Retval LS 32bits */ - unsigned long r3; /* Retval MS 32bits */ - unsigned long r4; /* r4-r7 Register arguments */ - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long orig_r2; /* Copy of r2 ?? */ - unsigned long ra; /* Return address */ - unsigned long fp; /* Frame pointer */ - unsigned long sp; /* Stack pointer */ - unsigned long gp; /* Global pointer */ - unsigned long estatus; - unsigned long ea; /* Exception return address (pc) */ - unsigned long orig_r7; -}; - -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long r16; /* r16-r23 Callee-saved GP registers */ - unsigned long r17; - unsigned long r18; - unsigned long r19; - unsigned long r20; - unsigned long r21; - unsigned long r22; - unsigned long r23; - unsigned long fp; - unsigned long gp; - unsigned long ra; +/* User structures for general purpose registers. */ +struct user_pt_regs { + __u32 regs[49]; }; #endif /* __ASSEMBLY__ */ diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h @@ -15,14 +15,16 @@ * details. */ -#ifndef _ASM_NIOS2_SIGCONTEXT_H -#define _ASM_NIOS2_SIGCONTEXT_H +#ifndef _UAPI__ASM_SIGCONTEXT_H +#define _UAPI__ASM_SIGCONTEXT_H -#include <asm/ptrace.h> +#include <linux/types.h> + +#define MCONTEXT_VERSION 2 struct sigcontext { - struct pt_regs regs; - unsigned long sc_mask; /* old sigmask */ + int version; + unsigned long gregs[32]; }; #endif diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..dda41e4fe707 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, struct ucontext *uc, int *pr2) { int temp; - greg_t *gregs = uc->uc_mcontext.gregs; + unsigned long *gregs = uc->uc_mcontext.gregs; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -127,7 +127,7 @@ badframe: static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; - greg_t *gregs = uc->uc_mcontext.gregs; + unsigned long *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 0d231adfe576..0c9b6afe69e9 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -126,7 +126,6 @@ good_area: break; } -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -220,11 +219,6 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f213f5b4c423..d17437238a2c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) if (likely(pgd != NULL)) { memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 actual_pgd += PTRS_PER_PGD; /* Populate first pmd with allocated memory. We mark it * with PxD_FLAG_ATTACHED as a signal to the system that this @@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 pgd -= PTRS_PER_PGD; #endif free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); @@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { -#ifdef CONFIG_64BIT if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - /* This is the permanent pmd attached to the pgd; - * cannot free it */ + /* + * This is the permanent pmd attached to the pgd; + * cannot free it. + * Increment the counter to compensate for the decrement + * done by generic mm code. + */ + mm_inc_nr_pmds(mm); return; -#endif free_pages((unsigned long)pmd, PMD_ORDER); } @@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 /* preserve the gateway marker if this is the beginning of * the permanent pmd */ if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 5a8997d63899..8eefb12d1d33 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -55,8 +55,8 @@ #define ENTRY_COMP(_name_) .word sys_##_name_ #endif - ENTRY_SAME(restart_syscall) /* 0 */ - ENTRY_SAME(exit) +90: ENTRY_SAME(restart_syscall) /* 0 */ +91: ENTRY_SAME(exit) ENTRY_SAME(fork_wrapper) ENTRY_SAME(read) ENTRY_SAME(write) @@ -439,7 +439,10 @@ ENTRY_SAME(bpf) ENTRY_COMP(execveat) - /* Nothing yet */ + +.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) +.error "size of syscall table does not fit value of __NR_Linux_syscalls" +.endif #undef ENTRY_SAME #undef ENTRY_DIFF diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 03cd858a401c..4cbe23af400a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -153,6 +153,7 @@ #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff #define PPC_INST_MFTMR 0x7c0002dc #define PPC_INST_MSGSND 0x7c00019c +#define PPC_INST_MSGCLR 0x7c0001dc #define PPC_INST_MSGSNDP 0x7c00011c #define PPC_INST_MTTMR 0x7c0003dc #define PPC_INST_NOP 0x60000000 @@ -309,6 +310,8 @@ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) +#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \ + ___PPC_RB(b)) #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1c874fb533bb..af56b5c6c81a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -608,13 +608,16 @@ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ +#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ +#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */ #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ #define SRR1_WAKERESET 0x00100000 /* System reset */ +#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */ #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, * may not be recoverable */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f337666768a7..f83046878336 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, + { /* Power8NVL */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004c0000, + .cpu_name = "POWER8NVL (raw)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power8", + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .flush_tlb = __flush_tlb_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, { /* Power8 DD1: Does not support doorbell IPIs */ .pvr_mask = 0xffffff00, .pvr_value = 0x004d0100, diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f4217819cc31..2128f3a96c32 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -17,6 +17,7 @@ #include <asm/dbell.h> #include <asm/irq_regs.h> +#include <asm/kvm_ppc.h> #ifdef CONFIG_SMP void doorbell_setup_this_cpu(void) @@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); + kvmppc_set_host_ipi(smp_processor_id(), 0); __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux(); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c2df8150bd7a..9519e6bdc6d7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1408,7 +1408,7 @@ machine_check_handle_early: bne 9f /* continue in V mode if we are. */ 5: -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER /* * We are coming from kernel context. Check if we are coming from * guest. if yes, then we can continue. We will fall through diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de4018a1bc4b..de747563d29d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) spin_lock(&vcpu->arch.vpa_update_lock); lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; if (lppaca) - yield_count = lppaca->yield_count; + yield_count = be32_to_cpu(lppaca->yield_count); spin_unlock(&vcpu->arch.vpa_update_lock); return yield_count; } @@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, bool preserve_top32) { + struct kvm *kvm = vcpu->kvm; struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; + mutex_lock(&kvm->lock); spin_lock(&vc->lock); /* * If ILE (interrupt little-endian) has changed, update the * MSR_LE bit in the intr_msr for each vcpu in this vcore. */ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { - struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *vcpu; int i; - mutex_lock(&kvm->lock); kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.vcore != vc) continue; @@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, else vcpu->arch.intr_msr &= ~MSR_LE; } - mutex_unlock(&kvm->lock); } /* @@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, mask &= 0xFFFFFFFF; vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); + mutex_unlock(&kvm->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bb94e6f20c81..6cbf1630cb70 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Save HEIR (HV emulation assist reg) in emul_inst if this is an HEI (HV emulation interrupt, e40) */ li r3,KVM_INST_FETCH_FAILED + stw r3,VCPU_LAST_INST(r9) cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST bne 11f mfspr r3,SPRN_HEIR diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index fc34025ef822..38a45088f633 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -33,6 +33,8 @@ #include <asm/runlatch.h> #include <asm/code-patching.h> #include <asm/dbell.h> +#include <asm/kvm_ppc.h> +#include <asm/ppc-opcode.h> #include "powernv.h" @@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void) static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; - unsigned long srr1; + unsigned long srr1, wmask; u32 idle_states; /* Standard hot unplug procedure */ @@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void) generic_set_cpu_dead(cpu); smp_wmb(); + wmask = SRR1_WAKEMASK; + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + wmask = SRR1_WAKEMASK_P8; + idle_states = pnv_get_supported_cpuidle_states(); /* We don't want to take decrementer interrupts while we are offline, * so clear LPCR:PECE1. We keep PECE2 enabled. @@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void) * having finished executing in a KVM guest, then srr1 * contains 0. */ - if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { + if ((srr1 & wmask) == SRR1_WAKEEE) { icp_native_flush_interrupt(); local_paca->irq_happened &= PACA_IRQ_HARD_DIS; smp_mb(); + } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); + kvmppc_set_host_ipi(cpu, 0); } if (cpu_core_split_required()) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 90cf3dcbd9f2..8f35d525cede 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -25,10 +25,10 @@ static struct kobject *mobility_kobj; struct update_props_workarea { - u32 phandle; - u32 state; - u64 reserved; - u32 nprops; + __be32 phandle; + __be32 state; + __be64 reserved; + __be32 nprops; } __packed; #define NODE_ACTION_MASK 0xff000000 @@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) return rc; } -static int delete_dt_node(u32 phandle) +static int delete_dt_node(__be32 phandle) { struct device_node *dn; - dn = of_find_node_by_phandle(phandle); + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); if (!dn) return -ENOENT; @@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, return 0; } -static int update_dt_node(u32 phandle, s32 scope) +static int update_dt_node(__be32 phandle, s32 scope) { struct update_props_workarea *upwa; struct device_node *dn; @@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope) char *prop_data; char *rtas_buf; int update_properties_token; + u32 nprops; u32 vd; update_properties_token = rtas_token("ibm,update-properties"); @@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope) if (!rtas_buf) return -ENOMEM; - dn = of_find_node_by_phandle(phandle); + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); if (!dn) { kfree(rtas_buf); return -ENOENT; @@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope) break; prop_data = rtas_buf + sizeof(*upwa); + nprops = be32_to_cpu(upwa->nprops); /* On the first call to ibm,update-properties for a node the * the first property value descriptor contains an empty @@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope) */ if (*prop_data == 0) { prop_data++; - vd = *(u32 *)prop_data; + vd = be32_to_cpu(*(__be32 *)prop_data); prop_data += vd + sizeof(vd); - upwa->nprops--; + nprops--; } - for (i = 0; i < upwa->nprops; i++) { + for (i = 0; i < nprops; i++) { char *prop_name; prop_name = prop_data; prop_data += strlen(prop_name) + 1; - vd = *(u32 *)prop_data; + vd = be32_to_cpu(*(__be32 *)prop_data); prop_data += sizeof(vd); switch (vd) { @@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope) return 0; } -static int add_dt_node(u32 parent_phandle, u32 drc_index) +static int add_dt_node(__be32 parent_phandle, __be32 drc_index) { struct device_node *dn; struct device_node *parent_dn; int rc; - parent_dn = of_find_node_by_phandle(parent_phandle); + parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); if (!parent_dn) return -ENOENT; @@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index) int pseries_devicetree_update(s32 scope) { char *rtas_buf; - u32 *data; + __be32 *data; int update_nodes_token; int rc; @@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope) if (rc && rc != 1) break; - data = (u32 *)rtas_buf + 4; - while (*data & NODE_ACTION_MASK) { + data = (__be32 *)rtas_buf + 4; + while (be32_to_cpu(*data) & NODE_ACTION_MASK) { int i; - u32 action = *data & NODE_ACTION_MASK; - int node_count = *data & NODE_COUNT_MASK; + u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; + u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; data++; for (i = 0; i < node_count; i++) { - u32 phandle = *data++; - u32 drc_index; + __be32 phandle = *data++; + __be32 drc_index; switch (action) { case DELETE_DT_NODE: diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index c9df40b5c0ac..c9c875d9ed31 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -211,7 +211,7 @@ do { \ extern unsigned long mmap_rnd_mask; -#define STACK_RND_MASK (mmap_rnd_mask) +#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) #define ARCH_DLINFO \ do { \ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -515,15 +515,15 @@ struct s390_io_adapter { #define S390_ARCH_FAC_MASK_SIZE_U64 \ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) -struct s390_model_fac { - /* facilities used in SIE context */ - __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; - /* subset enabled by kvm */ - __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; +struct kvm_s390_fac { + /* facility list requested by guest */ + __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; + /* facility mask supported by kvm & hosting machine */ + __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; }; struct kvm_s390_cpu_model { - struct s390_model_fac *fac; + struct kvm_s390_fac *fac; struct cpuid cpu_id; unsigned short ibc; }; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); + S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); if (prev == next) return; if (MACHINE_HAS_TLB_LC) @@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, atomic_dec(&prev->context.attach_count); if (MACHINE_HAS_TLB_LC) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); - S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end #endif } -static inline void clear_page(void *page) -{ - register unsigned long reg1 asm ("1") = 0; - register void *reg2 asm ("2") = page; - register unsigned long reg3 asm ("3") = 4096; - asm volatile( - " mvcl 2,0" - : "+d" (reg2), "+d" (reg3) : "d" (reg1) - : "memory", "cc"); -} +#define clear_page(page) memset((page), 0, PAGE_SIZE) /* * copy_page uses the mvcl instruction with 0xb0 padding byte in order to diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 82c19899574f..6c79f1b44fe7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -57,6 +57,44 @@ unsigned long ftrace_plt; +static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) +{ +#ifdef CC_USING_HOTPATCH + /* brcl 0,0 */ + insn->opc = 0xc004; + insn->disp = 0; +#else + /* stg r14,8(r15) */ + insn->opc = 0xe3e0; + insn->disp = 0xf0080024; +#endif +} + +static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) +{ +#ifdef CONFIG_KPROBES + if (insn->opc == BREAKPOINT_INSTRUCTION) + return 1; +#endif + return 0; +} + +static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) +{ +#ifdef CONFIG_KPROBES + insn->opc = BREAKPOINT_INSTRUCTION; + insn->disp = KPROBE_ON_FTRACE_NOP; +#endif +} + +static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn) +{ +#ifdef CONFIG_KPROBES + insn->opc = BREAKPOINT_INSTRUCTION; + insn->disp = KPROBE_ON_FTRACE_CALL; +#endif +} + int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { @@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return -EFAULT; if (addr == MCOUNT_ADDR) { /* Initial code replacement */ -#ifdef CC_USING_HOTPATCH - /* We expect to see brcl 0,0 */ - ftrace_generate_nop_insn(&orig); -#else - /* We expect to see stg r14,8(r15) */ - orig.opc = 0xe3e0; - orig.disp = 0xf0080024; -#endif + ftrace_generate_orig_insn(&orig); ftrace_generate_nop_insn(&new); - } else if (old.opc == BREAKPOINT_INSTRUCTION) { + } else if (is_kprobe_on_ftrace(&old)) { /* * If we find a breakpoint instruction, a kprobe has been * placed at the beginning of the function. We write the @@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, * bytes of the original instruction so that the kprobes * handler can execute a nop, if it reaches this breakpoint. */ - new.opc = orig.opc = BREAKPOINT_INSTRUCTION; - orig.disp = KPROBE_ON_FTRACE_CALL; - new.disp = KPROBE_ON_FTRACE_NOP; + ftrace_generate_kprobe_call_insn(&orig); + ftrace_generate_kprobe_nop_insn(&new); } else { /* Replace ftrace call with a nop. */ ftrace_generate_call_insn(&orig, rec->ip); @@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - if (old.opc == BREAKPOINT_INSTRUCTION) { + if (is_kprobe_on_ftrace(&old)) { /* * If we find a breakpoint instruction, a kprobe has been * placed at the beginning of the function. We write the @@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * bytes of the original instruction so that the kprobes * handler can execute a brasl if it reaches this breakpoint. */ - new.opc = orig.opc = BREAKPOINT_INSTRUCTION; - orig.disp = KPROBE_ON_FTRACE_NOP; - new.disp = KPROBE_ON_FTRACE_CALL; + ftrace_generate_kprobe_nop_insn(&orig); + ftrace_generate_kprobe_call_insn(&new); } else { /* Replace nop with an ftrace call. */ ftrace_generate_nop_insn(&orig); diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) insn->offset = (entry->target - entry->code) >> 1; } -static void jump_label_bug(struct jump_entry *entry, struct insn *insn) +static void jump_label_bug(struct jump_entry *entry, struct insn *expected, + struct insn *new) { unsigned char *ipc = (unsigned char *)entry->code; - unsigned char *ipe = (unsigned char *)insn; + unsigned char *ipe = (unsigned char *)expected; + unsigned char *ipn = (unsigned char *)new; pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); + pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", + ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); panic("Corrupted kernel text"); } @@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, } if (init) { if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) - jump_label_bug(entry, &old); + jump_label_bug(entry, &orignop, &new); } else { if (memcmp((void *)entry->code, &old, sizeof(old))) - jump_label_bug(entry, &old); + jump_label_bug(entry, &old, &new); } probe_kernel_write((void *)entry->code, &new, sizeof(new)); } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { + jump_label_apply_nops(me); vfree(me->arch.syminfo); me->arch.syminfo = NULL; return 0; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c3f8d157cb0d..e6a1578fc000 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); static struct attribute *cpumsf_pmu_events_attr[] = { CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), - CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), + NULL, NULL, }; @@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void) return -EINVAL; } - if (si.ad) + if (si.ad) { sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); + cpumsf_pmu_events_attr[1] = + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); + } sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); if (!sfdbg) diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); -void cpu_relax(void) +void notrace cpu_relax(void) { if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) asm volatile("diag 0,0,0x44"); diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 6b09fdffbd2f..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -177,6 +177,17 @@ restart_entry: lhi %r1,1 sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 +#ifdef CONFIG_SMP + larl %r1,smp_cpu_mt_shift + icm %r1,15,0(%r1) + jz smt_done + llgfr %r1,%r1 +smt_loop: + sigp %r1,%r0,SIGP_SET_MULTI_THREADING + brc 8,smt_done /* accepted */ + brc 2,smt_loop /* busy, try again */ +smt_done: +#endif larl %r1,.Lnew_pgm_check_psw lpswe 0(%r1) pgm_check_entry: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..19e17bd7aec0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: - case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_ENABLE_CAP_VM: @@ -522,7 +521,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, sizeof(struct cpuid)); kvm->arch.model.ibc = proc->ibc; - memcpy(kvm->arch.model.fac->kvm, proc->fac_list, + memcpy(kvm->arch.model.fac->list, proc->fac_list, S390_ARCH_FAC_LIST_SIZE_BYTE); } else ret = -EFAULT; @@ -556,7 +555,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) } memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); proc->ibc = kvm->arch.model.ibc; - memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); + memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) ret = -EFAULT; kfree(proc); @@ -576,10 +575,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) } get_cpu_id((struct cpuid *) &mach->cpuid); mach->ibc = sclp_get_ibc(); - memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, - kvm_s390_fac_list_mask_size() * sizeof(u64)); + memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, + S390_ARCH_FAC_LIST_SIZE_BYTE); memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_U64); + S390_ARCH_FAC_LIST_SIZE_BYTE); if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) ret = -EFAULT; kfree(mach); @@ -778,15 +777,18 @@ long kvm_arch_vm_ioctl(struct file *filp, static int kvm_s390_query_ap_config(u8 *config) { u32 fcn_code = 0x04000000UL; - u32 cc; + u32 cc = 0; + memset(config, 0, 128); asm volatile( "lgr 0,%1\n" "lgr 2,%2\n" ".long 0xb2af0000\n" /* PQAP(QCI) */ - "ipm %0\n" + "0: ipm %0\n" "srl %0,28\n" - : "=r" (cc) + "1:\n" + EX_TABLE(0b, 1b) + : "+r" (cc) : "r" (fcn_code), "r" (config) : "cc", "0", "2", "memory" ); @@ -839,9 +841,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) kvm_s390_set_crycb_format(kvm); - /* Disable AES/DEA protected key functions by default */ - kvm->arch.crypto.aes_kw = 0; - kvm->arch.crypto.dea_kw = 0; + /* Enable AES/DEA protected key functions by default */ + kvm->arch.crypto.aes_kw = 1; + kvm->arch.crypto.dea_kw = 1; + get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); return 0; } @@ -886,40 +892,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* * The architectural maximum amount of facilities is 16 kbit. To store * this amount, 2 kbyte of memory is required. Thus we need a full - * page to hold the active copy (arch.model.fac->sie) and the current - * facilities set (arch.model.fac->kvm). Its address size has to be + * page to hold the guest facility list (arch.model.fac->list) and the + * facility mask (arch.model.fac->mask). Its address size has to be * 31 bits and word aligned. */ kvm->arch.model.fac = - (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!kvm->arch.model.fac) goto out_nofac; - memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_U64); - - /* - * If this KVM host runs *not* in a LPAR, relax the facility bits - * of the kvm facility mask by all missing facilities. This will allow - * to determine the right CPU model by means of the remaining facilities. - * Live guest migration must prohibit the migration of KVMs running in - * a LPAR to non LPAR hosts. - */ - if (!MACHINE_IS_LPAR) - for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) - kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; - - /* - * Apply the kvm facility mask to limit the kvm supported/tolerated - * facility list. - */ + /* Populate the facility mask initially. */ + memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, + S390_ARCH_FAC_LIST_SIZE_BYTE); for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { if (i < kvm_s390_fac_list_mask_size()) - kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; + kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; else - kvm->arch.model.fac->kvm[i] = 0UL; + kvm->arch.model.fac->mask[i] = 0UL; } + /* Populate the facility list initially. */ + memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, + S390_ARCH_FAC_LIST_SIZE_BYTE); + kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; @@ -1165,8 +1160,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) mutex_lock(&vcpu->kvm->lock); vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; - memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, - S390_ARCH_FAC_LIST_SIZE_BYTE); vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; mutex_unlock(&vcpu->kvm->lock); @@ -1212,7 +1205,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); } - vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; + vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; spin_lock_init(&vcpu->arch.local_int.lock); vcpu->arch.local_int.float_int = &kvm->arch.float_int; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) /* test availability of facility in a kvm intance */ static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) { - return __test_facility(nr, kvm->arch.model.fac->kvm); + return __test_facility(nr, kvm->arch.model.fac->mask) && + __test_facility(nr, kvm->arch.model.fac->list); } /* are cpu states controlled by user space */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) * We need to shift the lower 32 facility bits (bit 0-31) from a u64 * into a u32 memory representation. They will remain bits 0-31. */ - fac = *vcpu->kvm->arch.model.fac->sie >> 32; + fac = *vcpu->kvm->arch.model.fac->list >> 32; rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), &fac, sizeof(fac)); if (rc) diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap_range); +EXPORT_SYMBOL(pci_iomap_range); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { @@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) } spin_unlock(&zpci_iomap_lock); } -EXPORT_SYMBOL_GPL(pci_iounmap); +EXPORT_SYMBOL(pci_iounmap); static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) @@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); } -static void zpci_map_resources(struct zpci_dev *zdev) +static void zpci_map_resources(struct pci_dev *pdev) { - struct pci_dev *pdev = zdev->pdev; resource_size_t len; int i; @@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) } } -static void zpci_unmap_resources(struct zpci_dev *zdev) +static void zpci_unmap_resources(struct pci_dev *pdev) { - struct pci_dev *pdev = zdev->pdev; resource_size_t len; int i; @@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) zdev->pdev = pdev; pdev->dev.groups = zpci_attr_groups; - zpci_map_resources(zdev); + zpci_map_resources(pdev); for (i = 0; i < PCI_BAR_COUNT; i++) { res = &pdev->resource[i]; @@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) return 0; } +void pcibios_release_device(struct pci_dev *pdev) +{ + zpci_unmap_resources(pdev); +} + int pcibios_enable_device(struct pci_dev *pdev, int mask) { struct zpci_dev *zdev = get_zdev(pdev); @@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) zdev->pdev = pdev; zpci_debug_init_device(zdev); zpci_fmb_enable_device(zdev); - zpci_map_resources(zdev); return pci_enable_resources(pdev, mask); } @@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) { struct zpci_dev *zdev = get_zdev(pdev); - zpci_unmap_resources(zdev); zpci_fmb_disable_device(zdev); zpci_debug_exit_device(zdev); zdev->pdev = NULL; @@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) #ifdef CONFIG_HIBERNATE_CALLBACKS static int zpci_restore(struct device *dev) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct zpci_dev *zdev = get_zdev(pdev); int ret = 0; if (zdev->state != ZPCI_FN_STATE_ONLINE) @@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) if (ret) goto out; - zpci_map_resources(zdev); + zpci_map_resources(pdev); zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, zdev->start_dma + zdev->iommu_size - 1, (u64) zdev->dma_table); @@ -709,12 +711,14 @@ out: static int zpci_freeze(struct device *dev) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct zpci_dev *zdev = get_zdev(pdev); if (zdev->state != ZPCI_FN_STATE_ONLINE) return 0; zpci_unregister_ioat(zdev, 0); + zpci_unmap_resources(pdev); return clp_disable_fh(zdev); } diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (copy_from_user(buf, user_buffer, length)) goto out; - memcpy_toio(io_addr, buf, length); - ret = 0; + ret = zpci_memcpy_toio(io_addr, buf, length); out: if (buf != local_buf) kfree(buf); @@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, goto out; io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); - ret = -EFAULT; - if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) + if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { + ret = -EFAULT; goto out; - - memcpy_fromio(buf, io_addr, length); - - if (copy_to_user(user_buffer, buf, length)) + } + ret = zpci_memcpy_fromio(buf, io_addr, length); + if (ret) goto out; + if (copy_to_user(user_buffer, buf, length)) + ret = -EFAULT; - ret = 0; out: if (buf != local_buf) kfree(buf); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 96ac69c5eba0..efb00ec75805 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -86,6 +86,9 @@ config ARCH_DEFCONFIG default "arch/sparc/configs/sparc32_defconfig" if SPARC32 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 +config ARCH_PROC_KCORE_TEXT + def_bool y + config IOMMU_HELPER bool default y if SPARC64 diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, unsigned long reg_val); #endif + +#define HV_FAST_M7_GET_PERFREG 0x43 +#define HV_FAST_M7_SET_PERFREG 0x44 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, + unsigned long *reg_val); +unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, + unsigned long reg_val); +#endif + /* Function numbers for HV_CORE_TRAP. */ #define HV_CORE_SET_VER 0x00 #define HV_CORE_PUTCHAR 0x01 @@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, #define HV_GRP_SDIO 0x0108 #define HV_GRP_SDIO_ERR 0x0109 #define HV_GRP_REBOOT_DATA 0x0110 +#define HV_GRP_M7_PERF 0x0114 #define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_FIRE_PERF 0x0201 #define HV_GRP_N2_CPU 0x0202 diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9b672be70dda..50d4840d9aeb 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr) { } -#define ioread8(X) readb(X) -#define ioread16(X) readw(X) -#define ioread16be(X) __raw_readw(X) -#define ioread32(X) readl(X) -#define ioread32be(X) __raw_readl(X) -#define iowrite8(val,X) writeb(val,X) -#define iowrite16(val,X) writew(val,X) -#define iowrite16be(val,X) __raw_writew(val,X) -#define iowrite32(val,X) writel(val,X) -#define iowrite32be(val,X) __raw_writel(val,X) +#define ioread8 readb +#define ioread16 readw +#define ioread16be __raw_readw +#define ioread32 readl +#define ioread32be __raw_readl +#define iowrite8 writeb +#define iowrite16 writew +#define iowrite16be __raw_writew +#define iowrite32 writel +#define iowrite32be __raw_writel /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr); diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h index c100dc27a0a9..176fa0ad19f1 100644 --- a/arch/sparc/include/asm/starfire.h +++ b/arch/sparc/include/asm/starfire.h @@ -12,7 +12,6 @@ extern int this_is_starfire; void check_if_starfire(void); -int starfire_hard_smp_processor_id(void); void starfire_hookup(int); unsigned int starfire_translate(unsigned long imap, unsigned int upaid); diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 88d322b67fac..07cc49e541f4 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs, void do_privop(struct pt_regs *regs); void do_privact(struct pt_regs *regs); void do_cee(struct pt_regs *regs); -void do_cee_tl1(struct pt_regs *regs); -void do_dae_tl1(struct pt_regs *regs); -void do_iae_tl1(struct pt_regs *regs); void do_div0_tl1(struct pt_regs *regs); -void do_fpdis_tl1(struct pt_regs *regs); void do_fpieee_tl1(struct pt_regs *regs); void do_fpother_tl1(struct pt_regs *regs); void do_ill_tl1(struct pt_regs *regs); diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -48,6 +48,7 @@ static struct api_info api_table[] = { { .group = HV_GRP_VT_CPU, }, { .group = HV_GRP_T5_CPU, }, { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, + { .group = HV_GRP_M7_PERF, }, }; static DEFINE_SPINLOCK(hvapi_lock); diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) retl nop ENDPROC(sun4v_t5_set_perfreg) + +ENTRY(sun4v_m7_get_perfreg) + mov %o1, %o4 + mov HV_FAST_M7_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_m7_get_perfreg) + +ENTRY(sun4v_m7_set_perfreg) + mov HV_FAST_M7_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_m7_set_perfreg) diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { .pcr_nmi_disable = PCR_N4_PICNPT, }; +static u64 m7_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_m7_get_perfreg(reg_num, &val); + + return val; +} + +static void m7_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_m7_set_perfreg(reg_num, val); +} + +static const struct pcr_ops m7_pcr_ops = { + .read_pcr = m7_pcr_read, + .write_pcr = m7_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; static unsigned long perf_hsvc_group; static unsigned long perf_hsvc_major; @@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) perf_hsvc_group = HV_GRP_T5_CPU; break; + case SUN4V_CHIP_SPARC_M7: + perf_hsvc_group = HV_GRP_M7_PERF; + break; + default: return -ENODEV; } @@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) pcr_ops = &n5_pcr_ops; break; + case SUN4V_CHIP_SPARC_M7: + pcr_ops = &m7_pcr_ops; + break; + default: ret = -ENODEV; break; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { .num_pic_regs = 4, }; +static void sparc_m7_write_pmc(int idx, u64 val) +{ + u64 pcr; + + pcr = pcr_ops->read_pcr(idx); + /* ensure ov and ntc are reset */ + pcr &= ~(PCR_N4_OV | PCR_N4_NTC); + + pcr_ops->write_pic(idx, val & 0xffffffff); + + pcr_ops->write_pcr(idx, pcr); +} + +static const struct sparc_pmu sparc_m7_pmu = { + .event_map = niagara4_event_map, + .cache_map = &niagara4_cache_map, + .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), + .read_pmc = sparc_vt_read_pmc, + .write_pmc = sparc_m7_write_pmc, + .upper_shift = 5, + .lower_shift = 5, + .event_mask = 0x7ff, + .user_bit = PCR_N4_UTRACE, + .priv_bit = PCR_N4_STRACE, + + /* We explicitly don't support hypervisor tracing. */ + .hv_bit = 0, + + .irq_bit = PCR_N4_TOE, + .upper_nop = 0, + .lower_nop = 0, + .flags = 0, + .max_hw_events = 4, + .num_pcrs = 4, + .num_pic_regs = 4, +}; static const struct sparc_pmu *sparc_pmu __read_mostly; static u64 event_encoding(u64 event_id, int idx) @@ -960,6 +996,8 @@ out: cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; } +static void sparc_pmu_start(struct perf_event *event, int flags); + /* On this PMU each PIC has it's own PCR control register. */ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) { @@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; - u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; - sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; - enc = perf_event_get_enc(cpuc->events[i]); - cpuc->pcr[idx] &= ~mask_for_index(idx); - if (hwc->state & PERF_HES_STOPPED) - cpuc->pcr[idx] |= nop_for_index(idx); - else - cpuc->pcr[idx] |= event_encoding(enc, idx); + sparc_pmu_start(cp, PERF_EF_RELOAD); } out: for (i = 0; i < cpuc->n_events; i++) { @@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) int i; local_irq_save(flags); - perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { @@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) } } - perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) unsigned long flags; local_irq_save(flags); - perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= sparc_pmu->max_hw_events) @@ -1394,7 +1422,6 @@ nocheck: ret = 0; out: - perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } @@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) sparc_pmu = &niagara4_pmu; return true; } + if (!strcmp(sparc_pmu_type, "sparc-m7")) { + sparc_pmu = &sparc_m7_pmu; + return true; + } return false; } diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", gp->tpc, gp->o7, gp->i7, gp->rpc); } + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); @@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) (cpu == this_cpu ? '*' : ' '), cpu, pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index da6f1a7fc4db..61139d9924ca 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) scheduler_ipi(); } -/* This is a nop because we capture all other cpus - * anyways when making the PROM active. - */ +static void stop_this_cpu(void *dummy) +{ + prom_stopself(); +} + void smp_send_stop(void) { + int cpu; + + if (tlb_type == hypervisor) { + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; +#ifdef CONFIG_SUN_LDOMS + if (ldom_domaining_enabled) { + unsigned long hv_err; + hv_err = sun4v_cpu_stop(cpu); + if (hv_err) + printk(KERN_ERR "sun4v_cpu_stop() " + "failed err=%lu\n", hv_err); + } else +#endif + prom_stopcpu_cpuid(cpu); + } + } else + smp_call_function(stop_this_cpu, NULL, 0); } /** diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c index 82281a566bb8..167fdfd9c837 100644 --- a/arch/sparc/kernel/starfire.c +++ b/arch/sparc/kernel/starfire.c @@ -28,11 +28,6 @@ void check_if_starfire(void) this_is_starfire = 1; } -int starfire_hard_smp_processor_id(void) -{ - return upa_readl(0x1fff40000d0UL); -} - /* * Each Starfire board has 32 registers which perform translation * and delivery of traditional interrupt packets into the extended diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index c85403d0496c..30e7ddb27a3a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second long err; /* No need for backward compatibility. We can start fresh... */ - if (call <= SEMCTL) { + if (call <= SEMTIMEDOP) { switch (call) { case SEMOP: err = sys_semtimedop(first, ptr, diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a27651e866e7..0e699745d643 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } user_instruction_dump ((unsigned int __user *) regs->tpc); } + if (panic_on_oops) + panic("Fatal exception"); if (regs->tstate & TSTATE_PRIV) do_exit(SIGKILL); do_exit(SIGSEGV); @@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs) die_if_kernel("TL0: Cache Error Exception", regs); } -void do_cee_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Cache Error Exception", regs); -} - -void do_dae_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Data Access Exception", regs); -} - -void do_iae_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Instruction Access Exception", regs); -} - void do_div0_tl1(struct pt_regs *regs) { exception_enter(); @@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs) die_if_kernel("TL1: DIV0 Exception", regs); } -void do_fpdis_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: FPU Disabled", regs); -} - void do_fpieee_tl1(struct pt_regs *regs) { exception_enter(); diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S @@ -8,9 +8,11 @@ .text ENTRY(memmove) /* o0=dst o1=src o2=len */ - mov %o0, %g1 + brz,pn %o2, 99f + mov %o0, %g1 + cmp %o0, %o1 - bleu,pt %xcc, memcpy + bleu,pt %xcc, 2f add %o1, %o2, %g7 cmp %g7, %o0 bleu,pt %xcc, memcpy @@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ stb %g7, [%o0] bne,pt %icc, 1b sub %o0, 1, %o0 - +99: retl mov %g1, %o0 + + /* We can't just call memcpy for these memmove cases. On some + * chips the memcpy uses cache initializing stores and when dst + * and src are close enough, those can clobber the source data + * before we've loaded it in. + */ +2: or %o0, %o1, %g7 + or %o2, %g7, %g7 + andcc %g7, 0x7, %g0 + bne,pn %xcc, 4f + nop + +3: ldx [%o1], %g7 + add %o1, 8, %o1 + subcc %o2, 8, %o2 + add %o0, 8, %o0 + bne,pt %icc, 3b + stx %g7, [%o0 - 0x8] + ba,a,pt %xcc, 99b + +4: ldub [%o1], %g7 + add %o1, 1, %o1 + subcc %o2, 1, %o2 + add %o0, 1, %o0 + bne,pt %icc, 4b + stb %g7, [%o0 - 0x1] + ba,a,pt %xcc, 99b ENDPROC(memmove) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3ea267c53320..4ca0d6ba5ec8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2820,7 +2820,7 @@ static int __init report_memory(void) return 0; } -device_initcall(report_memory); +arch_initcall(report_memory); #ifdef CONFIG_SMP #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 7083c16cccba..bb1376381985 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -14,13 +14,6 @@ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; -struct kaslr_setup_data { - __u64 next; - __u32 type; - __u32 len; - __u8 data[1]; -} kaslr_setup_data; - #define I8254_PORT_CONTROL 0x43 #define I8254_PORT_COUNTER0 0x40 #define I8254_CMD_READBACK 0xC0 @@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum, return slots_fetch_random(); } -static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) -{ - struct setup_data *data; - - kaslr_setup_data.type = SETUP_KASLR; - kaslr_setup_data.len = 1; - kaslr_setup_data.next = 0; - kaslr_setup_data.data[0] = enabled; - - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; - - while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; - - if (data) - data->next = (unsigned long)&kaslr_setup_data; - else - params->hdr.setup_data = (unsigned long)&kaslr_setup_data; - -} - -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) @@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params, #ifdef CONFIG_HIBERNATION if (!cmdline_find_option_bool("kaslr")) { debug_putstr("KASLR disabled by default...\n"); - add_kaslr_setup_data(params, 0); goto out; } #else if (cmdline_find_option_bool("nokaslr")) { debug_putstr("KASLR disabled by cmdline...\n"); - add_kaslr_setup_data(params, 0); goto out; } #endif - add_kaslr_setup_data(params, 1); /* Record the various known unsafe memory ranges. */ mem_avoid_init((unsigned long)input, input_size, diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 5903089c818f..a950864a64da 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, * the entire decompressed kernel plus relocation table, or the * entire decompressed kernel plus .bss and .brk sections. */ - output = choose_kernel_location(real_mode, input_data, input_len, - output, + output = choose_kernel_location(input_data, input_len, output, output_len > run_size ? output_len : run_size); diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index ee3576b2666b..04477d68403f 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option); #if CONFIG_RANDOMIZE_BASE /* aslr.c */ -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size); @@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params, bool has_cpuflag(int flag); #else static inline -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 947c6bf52c33..54f60ab41c63 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!src) return -ENOMEM; - assoc = (src + req->cryptlen + auth_tag_len); + assoc = (src + req->cryptlen); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); @@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { - scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); + scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); kfree(src); } return retval; diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 0dbc08282291..72ba21a8b5fc 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk) preempt_disable(); tsk->thread.fpu_counter = 0; __drop_fpu(tsk); - clear_used_math(); + clear_stopped_child_used_math(tsk); preempt_enable(); } diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 95e11f79f123..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; -extern bool kaslr_enabled; - static inline phys_addr_t get_max_mapped(void) { return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); +extern bool mp_should_keep_irq(struct device *dev); + struct pci_raw_ops { int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 44e6dd7e36a2..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -7,7 +7,6 @@ #define SETUP_DTB 2 #define SETUP_PCI 3 #define SETUP_EFI 4 -#define SETUP_KASLR 5 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3d525c6124f6..803b684676ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) } /* + * ACPI offers an alternative platform interface model that removes + * ACPI hardware requirements for platforms that do not implement + * the PC Architecture. + * + * We initialize the Hardware-reduced ACPI model here: + */ +static void __init acpi_reduced_hw_init(void) +{ + if (acpi_gbl_reduced_hardware) { + /* + * Override x86_init functions and bypass legacy pic + * in Hardware-reduced ACPI mode + */ + x86_init.timers.timer_init = x86_init_noop; + x86_init.irqs.pre_vector_init = x86_init_noop; + legacy_pic = &null_legacy_pic; + } +} + +/* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ @@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void) */ early_acpi_process_madt(); + /* + * Hardware-reduced ACPI mode initialization: + */ + acpi_reduced_hw_init(); + return 0; } diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index c2fd21fed002..017149cded07 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -37,10 +37,12 @@ static const struct apic apic_numachip; static unsigned int get_apic_id(unsigned long x) { unsigned long value; - unsigned int id; + unsigned int id = (x >> 24) & 0xff; - rdmsrl(MSR_FAM10H_NODE_ID, value); - id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); + if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + rdmsrl(MSR_FAM10H_NODE_ID, value); + id |= (value << 2) & 0xff00; + } return id; } @@ -155,10 +157,18 @@ static int __init numachip_probe(void) static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) { - if (c->phys_proc_id != node) { - c->phys_proc_id = node; - per_cpu(cpu_llc_id, smp_processor_id()) = node; + u64 val; + u32 nodes = 1; + + this_cpu_write(cpu_llc_id, node); + + /* Account for nodes per socket in multi-core-module processors */ + if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + rdmsrl(MSR_FAM10H_NODE_ID, val); + nodes = ((val >> 3) & 7) + 1; } + + c->phys_proc_id = node / nodes; } static int __init numachip_system_init(void) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d74d161687c..2babb393915e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -364,12 +364,21 @@ system_call_fastpath: * Has incomplete stack frame and undefined top of stack. */ ret_from_sys_call: - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) - jnz int_ret_from_sys_call_fixup /* Go the the slow path */ - LOCKDEP_SYS_EXIT DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF + + /* + * We must check ti flags with interrupts (or at least preemption) + * off because we must *never* return to userspace without + * processing exit work that is enqueued if we're preempted here. + * In particular, returning to userspace with any of the one-shot + * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is + * very bad. + */ + testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + jnz int_ret_from_sys_call_fixup /* Go the the slow path */ + CFI_REMEMBER_STATE /* * sysretq will re-enable interrupts: @@ -386,7 +395,7 @@ ret_from_sys_call: int_ret_from_sys_call_fixup: FIXUP_TOP_OF_STACK %r11, -ARGOFFSET - jmp int_ret_from_sys_call + jmp int_ret_from_sys_call_irqs_off /* Do syscall tracing */ tracesys: @@ -432,6 +441,7 @@ tracesys_phase2: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF +int_ret_from_sys_call_irqs_off: movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ GLOBAL(int_with_check) diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 9bbb9b35c144..d1ac80b72c72 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -47,13 +47,21 @@ do { \ #ifdef CONFIG_RANDOMIZE_BASE static unsigned long module_load_offset; +static int randomize_modules = 1; /* Mutex protects the module_load_offset. */ static DEFINE_MUTEX(module_kaslr_mutex); +static int __init parse_nokaslr(char *p) +{ + randomize_modules = 0; + return 0; +} +early_param("nokaslr", parse_nokaslr); + static unsigned long int get_module_load_offset(void) { - if (kaslr_enabled) { + if (randomize_modules) { mutex_lock(&module_kaslr_mutex); /* * Calculate the module_load_offset the first time this diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 98dc9317286e..0a2421cca01f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -122,8 +122,6 @@ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; -bool __read_mostly kaslr_enabled = false; - #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif @@ -427,11 +425,6 @@ static void __init reserve_initrd(void) } #endif /* CONFIG_BLK_DEV_INITRD */ -static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) -{ - kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); -} - static void __init parse_setup_data(void) { struct setup_data *data; @@ -457,9 +450,6 @@ static void __init parse_setup_data(void) case SETUP_EFI: parse_efi_setup(pa_data, data_len); break; - case SETUP_KASLR: - parse_kaslr_setup(pa_data, data_len); - break; default: break; } @@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void) static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { - if (kaslr_enabled) - pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, - __START_KERNEL, - __START_KERNEL_map, - MODULES_VADDR-1); - else - pr_emerg("Kernel Offset: disabled\n"); + pr_emerg("Kernel Offset: 0x%lx from 0x%lx " + "(relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, __START_KERNEL, + __START_KERNEL_map, MODULES_VADDR-1); return 0; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9d2073e2ecc9..4ff5d162ff9f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) goto exit; conditional_sti(regs); - if (!user_mode(regs)) + if (!user_mode_vm(regs)) die("bounds", regs, error_code); if (!cpu_feature_enabled(X86_FEATURE_MPX)) { @@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ - if (!dr6 && user_mode(regs)) + if (!dr6 && user_mode_vm(regs)) user_icebp = 1; /* Catch kmemcheck conditions first of all! */ diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 34f66e58a896..cdc6cf903078 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) * thread's fpu state, reconstruct fxstate from the fsave * header. Sanitize the copied state etc. */ - struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; @@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) */ drop_fpu(tsk); - if (__copy_from_user(xsave, buf_fx, state_size) || + if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || __copy_from_user(&env, buf, sizeof(env))) { + fpu_finit(fpu); err = -1; } else { sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); - set_used_math(); } + set_used_math(); if (use_eager_fpu()) { preempt_disable(); math_state_restore(); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index cc31f7c06d3d..9541ba34126b 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, return -EOPNOTSUPP; if (len != 1) { + memset(val, 0, len); pr_pic_unimpl("non byte read\n"); return 0; } diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index b1947e0f3e10..46d4449772bc 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic, int vector, int trigger_mode) { int i; + struct kvm_lapic *apic = vcpu->arch.apic; for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; @@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); spin_lock(&ioapic->lock); - if (trigger_mode != IOAPIC_LEVEL_TRIG) + if (trigger_mode != IOAPIC_LEVEL_TRIG || + kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) continue; ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bd4e34de24c7..4ee827d7bf36 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) { - if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && - kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { + if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { int trigger_mode; if (apic_test_vector(vector, apic->regs + APIC_TMR)) trigger_mode = IOAPIC_LEVEL_TRIG; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f7b20b417a3a..ae4f6d35d19c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; - if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { + if (is_guest_mode(vcpu)) + msr_bitmap = vmx_msr_bitmap_nested; + else if (irqchip_in_kernel(vcpu->kvm) && + apic_x2apic_mode(vcpu->arch.apic)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else @@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ vmx->nested.nested_vmx_secondary_ctls_high |= - SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_UNRESTRICTED_GUEST; + SECONDARY_EXEC_ENABLE_EPT; vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; @@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) } else vmx->nested.nested_vmx_ept_caps = 0; + if (enable_unrestricted_guest) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_UNRESTRICTED_GUEST; + /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, vmx->nested.nested_vmx_misc_low, @@ -9218,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } if (cpu_has_vmx_msr_bitmap() && - exec_control & CPU_BASED_USE_MSR_BITMAPS && - nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { - vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); + exec_control & CPU_BASED_USE_MSR_BITMAPS) { + nested_vmx_merge_msr_bitmap(vcpu, vmcs12); + /* MSR_BITMAP will be set by following vmx_set_efer. */ } else exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..32bf19ef3115 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: - case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) } } -/* - * Some device drivers assume dev->irq won't change after calling - * pci_disable_device(). So delay releasing of IRQ resource to driver - * unbinding time. Otherwise it will break PM subsystem and drivers - * like xen-pciback etc. - */ -static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct pci_dev *dev = to_pci_dev(data); - - if (action != BUS_NOTIFY_UNBOUND_DRIVER) - return NOTIFY_DONE; - - if (pcibios_disable_irq) - pcibios_disable_irq(dev); - - return NOTIFY_OK; -} - -static struct notifier_block pci_irq_nb = { - .notifier_call = pci_irq_notifier, - .priority = INT_MIN, -}; - int __init pcibios_init(void) { if (!raw_pci_ops) { @@ -550,9 +525,6 @@ int __init pcibios_init(void) if (pci_bf_sort >= pci_force_bf) pci_sort_breadthfirst(); - - bus_register_notifier(&pci_bus_type, &pci_irq_nb); - return 0; } @@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return 0; } +void pcibios_disable_device (struct pci_dev *dev) +{ + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + pcibios_disable_irq(dev); +} + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (dev->irq_managed && dev->irq > 0) { + if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && + dev->irq > 0) { mp_unmap_irq(dev->irq); dev->irq_managed = 0; - dev->irq = 0; } } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; } +bool mp_should_keep_irq(struct device *dev) +{ + if (dev->power.is_prepared) + return true; +#ifdef CONFIG_PM + if (dev->power.runtime_status == RPM_SUSPENDING) + return true; +#endif + + return false; +} + static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { + if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && + dev->irq_managed && dev->irq) { mp_unmap_irq(dev->irq); dev->irq = 0; dev->irq_managed = 0; diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S index 31776d0efc8c..d7ec4e251c0a 100644 --- a/arch/x86/vdso/vdso32/sigreturn.S +++ b/arch/x86/vdso/vdso32/sigreturn.S @@ -17,6 +17,7 @@ .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function + nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 740ae3026a14..9f93af56a5fc 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn) if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) p2m_init(p2m); else - p2m_init_identity(p2m, pfn); + p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); spin_lock_irqsave(&p2m_update_lock, flags); diff --git a/block/blk-merge.c b/block/blk-merge.c index fc1ff3b1ea1f..fd3fee81c23c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { struct bio_vec *bprev; - bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) return false; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d53a764b05ea..be3290cc0644 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, /* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for - * some to complete. + * some to complete. Note that hctx can be NULL here for + * reserved tag allocation. */ - blk_mq_run_hw_queue(hctx, false); + if (hctx) + blk_mq_run_hw_queue(hctx, false); /* * Retry tag allocation after running the hardware queue, diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f4bea21052e..b7b8933ec241 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) */ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) - goto err_map; + goto err_mq_usage; setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, 30000); @@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) blk_mq_init_cpu_queues(q, set->nr_hw_queues); if (blk_mq_init_hw_queues(q, set)) - goto err_hw; + goto err_mq_usage; mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); @@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) return q; -err_hw: +err_mq_usage: blk_cleanup_queue(q); err_hctxs: kfree(map); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -65,6 +65,7 @@ struct lpss_private_data; struct lpss_device_desc { unsigned int flags; + const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; void (*setup)(struct lpss_private_data *pdata); @@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { static struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { static struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -313,7 +316,7 @@ out: return PTR_ERR(clk); pdata->clk = clk; - clk_register_clkdev(clk, NULL, devname); + clk_register_clkdev(clk, dev_desc->clk_con_id, devname); return 0; } diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index e7f718d6918a..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev) if (!pin || !dev->irq_managed || dev->irq <= 0) return; + /* Keep IOAPIC pin configuration when suspending */ + if (dev->dev.power.is_prepared) + return; +#ifdef CONFIG_PM + if (dev->dev.power.runtime_status == RPM_SUSPENDING) + return; +#endif + entry = acpi_pci_irq_lookup(dev, pin); if (!entry) return; @@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev) if (gsi >= 0) { acpi_unregister_gsi(gsi); dev->irq_managed = 0; - dev->irq = 0; } } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4c35f0822d06..ef150ebb4c30 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) return NULL; /* libsas case */ - if (!ap->scsi_host) { + if (ap->flags & ATA_FLAG_SAS_HOST) { tag = ata_sas_allocate_tag(ap); if (tag < 0) return NULL; @@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) tag = qc->tag; if (likely(ata_tag_valid(tag))) { qc->tag = ATA_TAG_POISON; - if (!ap->scsi_host) + if (ap->flags & ATA_FLAG_SAS_HOST) ata_sas_free_tag(tag, ap); } } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -869,6 +869,8 @@ try_offline_again: */ ata_msleep(ap, 1); + sata_set_spd(link); + /* * Now, bring the host controller online again, this can take time * as PHY reset and communication establishment, 1st D2H FIS and diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index beb8b27d4621..a13587b5c2be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops; extern struct regcache_ops regcache_lzo_ops; extern struct regcache_ops regcache_flat_ops; +static inline const char *regmap_name(const struct regmap *map) +{ + if (map->dev) + return dev_name(map->dev); + + return map->name; +} + #endif diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index d453a2c98ad0..81751a49d8bf 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (pos == 0) { memmove(blk + offset * map->cache_word_size, blk, rbnode->blklen * map->cache_word_size); - bitmap_shift_right(present, present, offset, blklen); + bitmap_shift_left(present, present, offset, blklen); } /* update the rbnode block, its size and the base register */ diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f373c35f9e1d..87db9893b463 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -218,7 +218,7 @@ int regcache_read(struct regmap *map, ret = map->cache_ops->read(map, reg, value); if (ret == 0) - trace_regmap_reg_read_cache(map->dev, reg, *value); + trace_regmap_reg_read_cache(map, reg, *value); return ret; } @@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map) dev_dbg(map->dev, "Syncing %s cache\n", map->cache_ops->name); name = map->cache_ops->name; - trace_regcache_sync(map->dev, name, "start"); + trace_regcache_sync(map, name, "start"); if (!map->cache_dirty) goto out; @@ -346,7 +346,7 @@ out: regmap_async_complete(map); - trace_regcache_sync(map->dev, name, "stop"); + trace_regcache_sync(map, name, "stop"); return ret; } @@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, name = map->cache_ops->name; dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); - trace_regcache_sync(map->dev, name, "start region"); + trace_regcache_sync(map, name, "start region"); if (!map->cache_dirty) goto out; @@ -401,7 +401,7 @@ out: regmap_async_complete(map); - trace_regcache_sync(map->dev, name, "stop region"); + trace_regcache_sync(map, name, "stop region"); return ret; } @@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min, map->lock(map->lock_arg); - trace_regcache_drop_region(map->dev, min, max); + trace_regcache_drop_region(map, min, max); ret = map->cache_ops->drop(map, min, max); @@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable) map->lock(map->lock_arg); WARN_ON(map->cache_bypass && enable); map->cache_only = enable; - trace_regmap_cache_only(map->dev, enable); + trace_regmap_cache_only(map, enable); map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_only); @@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable) map->lock(map->lock_arg); WARN_ON(map->cache_only && enable); map->cache_bypass = enable; - trace_regmap_cache_bypass(map->dev, enable); + trace_regmap_cache_bypass(map, enable); map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_bypass); @@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block, for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) continue; val = regcache_get_val(map, block, i); @@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) { + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) { ret = regcache_sync_block_raw_flush(map, &data, base, regtmp); if (ret != 0) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6299a50a5960..a6c3f75b4b01 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } - ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, + ret = request_threaded_irq(irq, NULL, regmap_irq_thread, + irq_flags | IRQF_ONESHOT, chip->name, d); if (ret != 0) { dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f99b098ddabf..dbfe6a69c3da 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, if (map->async && map->bus->async_write) { struct regmap_async *async; - trace_regmap_async_write_start(map->dev, reg, val_len); + trace_regmap_async_write_start(map, reg, val_len); spin_lock_irqsave(&map->async_lock, flags); async = list_first_entry_or_null(&map->async_free, @@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, return ret; } - trace_regmap_hw_write_start(map->dev, reg, - val_len / map->format.val_bytes); + trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); /* If we're doing a single register write we can probably just * send the work_buf directly, otherwise try to do a gather @@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, kfree(buf); } - trace_regmap_hw_write_done(map->dev, reg, - val_len / map->format.val_bytes); + trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); return ret; } @@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, map->format.format_write(map, reg, val); - trace_regmap_hw_write_start(map->dev, reg, 1); + trace_regmap_hw_write_start(map, reg, 1); ret = map->bus->write(map->bus_context, map->work_buf, map->format.buf_size); - trace_regmap_hw_write_done(map->dev, reg, 1); + trace_regmap_hw_write_done(map, reg, 1); return ret; } @@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, dev_info(map->dev, "%x <= %x\n", reg, val); #endif - trace_regmap_reg_write(map->dev, reg, val); + trace_regmap_reg_write(map, reg, val); return map->reg_write(context, reg, val); } @@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, for (i = 0; i < num_regs; i++) { int reg = regs[i].reg; int val = regs[i].def; - trace_regmap_hw_write_start(map->dev, reg, 1); + trace_regmap_hw_write_start(map, reg, 1); map->format.format_reg(u8, reg, map->reg_shift); u8 += reg_bytes + pad_bytes; map->format.format_val(u8, val, 0); @@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, for (i = 0; i < num_regs; i++) { int reg = regs[i].reg; - trace_regmap_hw_write_done(map->dev, reg, 1); + trace_regmap_hw_write_done(map, reg, 1); } return ret; } @@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, */ u8[0] |= map->read_flag_mask; - trace_regmap_hw_read_start(map->dev, reg, - val_len / map->format.val_bytes); + trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); ret = map->bus->read(map->bus_context, map->work_buf, map->format.reg_bytes + map->format.pad_bytes, val, val_len); - trace_regmap_hw_read_done(map->dev, reg, - val_len / map->format.val_bytes); + trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); return ret; } @@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, dev_info(map->dev, "%x => %x\n", reg, *val); #endif - trace_regmap_reg_read(map->dev, reg, *val); + trace_regmap_reg_read(map, reg, *val); if (!map->cache_bypass) regcache_write(map, reg, *val); @@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret) struct regmap *map = async->map; bool wake; - trace_regmap_async_io_complete(map->dev); + trace_regmap_async_io_complete(map); spin_lock(&map->async_lock); list_move(&async->list, &map->async_free); @@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map) if (!map->bus || !map->bus->async_write) return 0; - trace_regmap_async_complete_start(map->dev); + trace_regmap_async_complete_start(map); wait_event(map->async_waitq, regmap_async_is_done(map)); @@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map) map->async_ret = 0; spin_unlock_irqrestore(&map->async_lock, flags); - trace_regmap_async_complete_done(map->dev); + trace_regmap_async_complete_done(map); return ret; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4bc2a5cb9935..a98c41f72c63 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -803,10 +803,6 @@ static int __init nbd_init(void) return -EINVAL; } - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); - if (!nbd_dev) - return -ENOMEM; - part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); @@ -828,6 +824,10 @@ static int __init nbd_init(void) if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); + if (!nbd_dev) + return -ENOMEM; + for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ceb32dd52a6c..e23be20a3417 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) } get_device(dev->device); + INIT_LIST_HEAD(&dev->node); INIT_WORK(&dev->probe_work, nvme_async_probe); schedule_work(&dev->probe_work); return 0; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) { int rc; - rc = device_add(&chip->dev); + rc = cdev_add(&chip->cdev, chip->dev.devt, 1); if (rc) { dev_err(&chip->dev, - "unable to device_register() %s, major %d, minor %d, err=%d\n", + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", chip->devname, MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); + device_unregister(&chip->dev); return rc; } - rc = cdev_add(&chip->cdev, chip->dev.devt, 1); + rc = device_add(&chip->dev); if (rc) { dev_err(&chip->dev, - "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + "unable to device_register() %s, major %d, minor %d, err=%d\n", chip->devname, MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); - device_unregister(&chip->dev); return rc; } @@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. * - * Creates a character device for the TPM chip and adds sysfs interfaces for - * the device, PPI and TCPA. As the last step this function adds the - * chip to the list of TPM chips available for use. + * Creates a character device for the TPM chip and adds sysfs attributes for + * the device. As the last step this function adds the chip to the list of TPM + * chips available for in-kernel use. * - * NOTE: This function should be only called after the chip initialization - * is complete. - * - * Called from tpm_<specific>.c probe function only for devices - * the driver has determined it should claim. Prior to calling - * this function the specific probe function has called pci_enable_device - * upon errant exit from this function specific probe function should call - * pci_disable_device + * This function should be only called after the chip initialization is + * complete. */ int tpm_chip_register(struct tpm_chip *chip) { int rc; - rc = tpm_dev_add_device(chip); - if (rc) - return rc; - /* Populate sysfs for TPM1 devices. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { rc = tpm_sysfs_add_device(chip); @@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) chip->bios_dir = tpm_bios_log_setup(chip->devname); } + rc = tpm_dev_add_device(chip); + if (rc) + return rc; + /* Make the chip available. */ spin_lock(&driver_lock); list_add_rcu(&chip->list, &tpm_chip_list); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm; struct ibmvtpm_crq crq; - u64 *word = (u64 *) &crq; + __be64 *word = (__be64 *)&crq; int rc; ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); @@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = (u16)count; - crq.data = ibmvtpm->rtce_dma_handle; + crq.len = cpu_to_be16(count); + crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), - cpu_to_be64(word[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), + be64_to_cpu(word[1])); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -22,9 +22,9 @@ struct ibmvtpm_crq { u8 valid; u8 msg; - u16 len; - u32 data; - u64 reserved; + __be16 len; + __be32 data; + __be64 reserved; } __attribute__((packed, aligned(8))); struct ibmvtpm_crq_queue { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index fae2dbbf5745..72d7028f779b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -142,6 +142,7 @@ struct ports_device { * notification */ struct work_struct control_work; + struct work_struct config_work; struct list_head ports; @@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev) portdev = vdev->priv; + if (!use_multiport(portdev)) + schedule_work(&portdev->config_work); +} + +static void config_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + + portdev = container_of(work, struct ports_device, control_work); if (!use_multiport(portdev)) { + struct virtio_device *vdev; struct port *port; u16 rows, cols; + vdev = portdev->vdev; virtio_cread(vdev, struct virtio_console_config, cols, &cols); virtio_cread(vdev, struct virtio_console_config, rows, &rows); @@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev) virtio_device_ready(portdev->vdev); + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + if (multiport) { unsigned int nr_added_bufs; spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); - INIT_WORK(&portdev->control_work, &control_work_handler); nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); @@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev) /* Finish up work that's lined up */ if (use_multiport(portdev)) cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); @@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev) virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); + cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, divider->flags); } -/* - * The reverse of DIV_ROUND_UP: The maximum number which - * divided by m is r - */ -#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) - static bool _is_valid_table_div(const struct clk_div_table *table, unsigned int div) { @@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, unsigned long parent_rate, unsigned long rate, unsigned long flags) { - int up, down, div; + int up, down; + unsigned long up_rate, down_rate; - up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); + up = DIV_ROUND_UP(parent_rate, rate); + down = parent_rate / rate; if (flags & CLK_DIVIDER_POWER_OF_TWO) { - up = __roundup_pow_of_two(div); - down = __rounddown_pow_of_two(div); + up = __roundup_pow_of_two(up); + down = __rounddown_pow_of_two(down); } else if (table) { - up = _round_up_table(table, div); - down = _round_down_table(table, div); + up = _round_up_table(table, up); + down = _round_down_table(table, down); } - return (up - div) <= (div - down) ? up : down; + up_rate = DIV_ROUND_UP(parent_rate, up); + down_rate = DIV_ROUND_UP(parent_rate, down); + + return (rate - up_rate) <= (down_rate - rate) ? up : down; } static int _div_round(const struct clk_div_table *table, @@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, return i; } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), - MULT_ROUND_UP(rate, i)); + rate * i); now = DIV_ROUND_UP(parent_rate, i); if (_is_best_div(rate, now, best, flags)) { bestdiv = i; @@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, bestdiv = readl(divider->reg) >> divider->shift; bestdiv &= div_mask(divider->width); bestdiv = _get_div(divider->table, bestdiv, divider->flags); - return bestdiv; + return DIV_ROUND_UP(*prate, bestdiv); } return divider_round_rate(hw, rate, prate, divider->table, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) return rate; } -EXPORT_SYMBOL_GPL(clk_core_get_rate); /** * clk_get_rate - return the rate of clk @@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) } /** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q) +{ + /* trivial case: identical struct clk's or both NULL */ + if (p == q) + return true; + + /* true if clk->core pointers match. Avoid derefing garbage */ + if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) + if (p->core == q->core) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(clk_is_match); + +/** * __clk_init - initialize the data structures in a struct clk * @dev: device initializing this clk, placeholder for now * @clk: clk being initialized diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -48,6 +48,17 @@ static struct clk_pll pll3 = { }, }; +static struct clk_regmap pll4_vote = { + .enable_reg = 0x34c0, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "pll4_vote", + .parent_names = (const char *[]){ "pll4" }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + static struct clk_pll pll8 = { .l_reg = 0x3144, .m_reg = 0x3148, @@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { static struct clk_regmap *gcc_msm8960_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, @@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { static struct clk_regmap *gcc_apq8064_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { .remove = lcc_ipq806x_remove, .driver = { .name = "lcc-ipq806x", - .owner = THIS_MODULE, .of_match_table = lcc_ipq806x_match_table, }, }; diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { .mnctr_en_bit = 8, .mnctr_reset_bit = 7, .mnctr_mode_shift = 5, - .n_val_shift = 16, - .m_val_shift = 16, + .n_val_shift = 24, + .m_val_shift = 8, .width = 8, }, .p = { @@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) return PTR_ERR(regmap); /* Use the correct frequency plan depending on speed of PLL4 */ - val = regmap_read(regmap, 0x4, &val); + regmap_read(regmap, 0x4, &val); if (val == 0x12) { slimbus_src.freq_tbl = clk_tbl_aif_osr_492; mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; @@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { .remove = lcc_msm8960_remove, .driver = { .name = "lcc-msm8960", - .owner = THIS_MODULE, .of_match_table = lcc_msm8960_match_table, }, }; diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v |= (1 << FAPLL_MAIN_PLLEN); + v |= FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); return 0; @@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v &= ~(1 << FAPLL_MAIN_PLLEN); + v &= ~FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); } @@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - return v & (1 << FAPLL_MAIN_PLLEN); + return v & FAPLL_MAIN_PLLEN; } static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 68161f7a07d6..a0b036ccb118 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI config SH_TIMER_CMT bool "Renesas CMT timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS + depends on HAS_IOMEM default SYS_SUPPORTS_SH_CMT help This enables build of a clocksource and clockevent driver for @@ -201,6 +202,7 @@ config SH_TIMER_CMT config SH_TIMER_MTU2 bool "Renesas MTU2 timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS + depends on HAS_IOMEM default SYS_SUPPORTS_SH_MTU2 help This enables build of a clockevent driver for the Multi-Function @@ -210,6 +212,7 @@ config SH_TIMER_MTU2 config SH_TIMER_TMU bool "Renesas TMU timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS + depends on HAS_IOMEM default SYS_SUPPORTS_SH_TMU help This enables build of a clocksource and clockevent driver for diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c index bba62f9deefb..ec57ba2bbd87 100644 --- a/drivers/clocksource/time-efm32.c +++ b/drivers/clocksource/time-efm32.c @@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np) clock_event_ddata.base = base; clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); - setup_irq(irq, &efm32_clock_event_irq); - clockevents_config_and_register(&clock_event_ddata.evtdev, DIV_ROUND_CLOSEST(rate, 1024), 0xf, 0xffff); + setup_irq(irq, &efm32_clock_event_irq); + return 0; err_get_irq: diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 02268448dc85..58597fbcc046 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -17,7 +17,6 @@ #include <linux/irq.h> #include <linux/irqreturn.h> #include <linux/reset.h> -#include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = { .dev_id = &sun5i_clockevent, }; -static u64 sun5i_timer_sched_read(void) -{ - return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); -} - static void __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; @@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node) writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, timer_base + TIMER_CTL_REG(1)); - sched_clock_register(sun5i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, rate, 340, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - ret = setup_irq(irq, &sun5i_timer_irq); - if (ret) - pr_warn("failed to setup irq %d\n", irq); - /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); @@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node) clockevents_config_and_register(&sun5i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); + + ret = setup_irq(irq, &sun5i_timer_irq); + if (ret) + pr_warn("failed to setup irq %d\n", irq); } CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", sun5i_timer_init); diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c index 38e68618513a..980151f34707 100644 --- a/drivers/cpuidle/cpuidle-mvebu-v7.c +++ b/drivers/cpuidle/cpuidle-mvebu-v7.c @@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev, deepidle = true; ret = mvebu_v7_cpu_suspend(deepidle); + cpu_pm_exit(); + if (ret) return ret; - cpu_pm_exit(); - return index; } @@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = { .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = mvebu_v7_enter_idle, - .exit_latency = 10, + .exit_latency = 100, .power_usage = 50, - .target_residency = 100, + .target_residency = 1000, .name = "MV CPU IDLE", .desc = "CPU power down", }, .states[2] = { .enter = mvebu_v7_enter_idle, - .exit_latency = 100, + .exit_latency = 1000, .power_usage = 5, - .target_residency = 1000, + .target_residency = 10000, .flags = MVEBU_V7_FLAG_DEEP_IDLE, .name = "MV CPU DEEP IDLE", .desc = "CPU and L2 Fabric power down", diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4a5fd245014e..83aa55d6fa5d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -97,6 +97,12 @@ #define DRIVER_NAME "pl08xdmac" +#define PL80X_DMA_BUSWIDTHS \ + BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) + static struct amba_driver pl08x_amba_driver; struct pl08x_driver_data; @@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.device_pause = pl08x_pause; pl08x->memcpy.device_resume = pl08x_resume; pl08x->memcpy.device_terminate_all = pl08x_terminate_all; + pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); + pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; /* Initialize slave engine */ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); @@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.device_pause = pl08x_pause; pl08x->slave.device_resume = pl08x_resume; pl08x->slave.device_terminate_all = pl08x_terminate_all; + pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; /* Get the platform data */ pl08x->pd = dev_get_platdata(&adev->dev); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1e1a4c567542..0b4fc6fb48ce 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) } /* - * atc_get_current_descriptors - - * locate the descriptor which equal to physical address in DSCR - * @atchan: the channel we want to start - * @dscr_addr: physical descriptor address in DSCR + * atc_get_desc_by_cookie - get the descriptor of a cookie + * @atchan: the DMA channel + * @cookie: the cookie to get the descriptor for */ -static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, - u32 dscr_addr) +static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, + dma_cookie_t cookie) { - struct at_desc *desc, *_desc, *child, *desc_cur = NULL; + struct at_desc *desc, *_desc; - list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { - if (desc->lli.dscr == dscr_addr) { - desc_cur = desc; - break; - } + list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { + if (desc->txd.cookie == cookie) + return desc; + } - list_for_each_entry(child, &desc->tx_list, desc_node) { - if (child->lli.dscr == dscr_addr) { - desc_cur = child; - break; - } - } + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (desc->txd.cookie == cookie) + return desc; } - return desc_cur; + return NULL; } -/* - * atc_get_bytes_left - - * Get the number of bytes residue in dma buffer, - * @chan: the channel we want to start +/** + * atc_calc_bytes_left - calculates the number of bytes left according to the + * value read from CTRLA. + * + * @current_len: the number of bytes left before reading CTRLA + * @ctrla: the value of CTRLA + * @desc: the descriptor containing the transfer width + */ +static inline int atc_calc_bytes_left(int current_len, u32 ctrla, + struct at_desc *desc) +{ + return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); +} + +/** + * atc_calc_bytes_left_from_reg - calculates the number of bytes left according + * to the current value of CTRLA. + * + * @current_len: the number of bytes left before reading CTRLA + * @atchan: the channel to read CTRLA for + * @desc: the descriptor containing the transfer width + */ +static inline int atc_calc_bytes_left_from_reg(int current_len, + struct at_dma_chan *atchan, struct at_desc *desc) +{ + u32 ctrla = channel_readl(atchan, CTRLA); + + return atc_calc_bytes_left(current_len, ctrla, desc); +} + +/** + * atc_get_bytes_left - get the number of bytes residue for a cookie + * @chan: DMA channel + * @cookie: transaction identifier to check status of */ -static int atc_get_bytes_left(struct dma_chan *chan) +static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; struct at_desc *desc_first = atc_first_active(atchan); - struct at_desc *desc_cur; - int ret = 0, count = 0; + struct at_desc *desc; + int ret; + u32 ctrla, dscr; /* - * Initialize necessary values in the first time. - * remain_desc record remain desc length. + * If the cookie doesn't match to the currently running transfer then + * we can return the total length of the associated DMA transfer, + * because it is still queued. */ - if (atchan->remain_desc == 0) - /* First descriptor embedds the transaction length */ - atchan->remain_desc = desc_first->len; + desc = atc_get_desc_by_cookie(atchan, cookie); + if (desc == NULL) + return -EINVAL; + else if (desc != desc_first) + return desc->total_len; - /* - * This happens when current descriptor transfer complete. - * The residual buffer size should reduce current descriptor length. - */ - if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { - clear_bit(ATC_IS_BTC, &atchan->status); - desc_cur = atc_get_current_descriptors(atchan, - channel_readl(atchan, DSCR)); - if (!desc_cur) { - ret = -EINVAL; - goto out; - } + /* cookie matches to the currently running transfer */ + ret = desc_first->total_len; - count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) - << desc_first->tx_width; - if (atchan->remain_desc < count) { - ret = -EINVAL; - goto out; + if (desc_first->lli.dscr) { + /* hardware linked list transfer */ + + /* + * Calculate the residue by removing the length of the child + * descriptors already transferred from the total length. + * To get the current child descriptor we can use the value of + * the channel's DSCR register and compare it against the value + * of the hardware linked list structure of each child + * descriptor. + */ + + ctrla = channel_readl(atchan, CTRLA); + rmb(); /* ensure CTRLA is read before DSCR */ + dscr = channel_readl(atchan, DSCR); + + /* for the first descriptor we can be more accurate */ + if (desc_first->lli.dscr == dscr) + return atc_calc_bytes_left(ret, ctrla, desc_first); + + ret -= desc_first->len; + list_for_each_entry(desc, &desc_first->tx_list, desc_node) { + if (desc->lli.dscr == dscr) + break; + + ret -= desc->len; } - atchan->remain_desc -= count; - ret = atchan->remain_desc; - } else { /* - * Get residual bytes when current - * descriptor transfer in progress. + * For the last descriptor in the chain we can calculate + * the remaining bytes using the channel's register. + * Note that the transfer width of the first and last + * descriptor may differ. */ - count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) - << (desc_first->tx_width); - ret = atchan->remain_desc - count; + if (!desc->lli.dscr) + ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + } else { + /* single transfer */ + ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); } - /* - * Check fifo empty. - */ - if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) - atc_issue_pending(chan); -out: return ret; } @@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) /* Give information to tasklet */ set_bit(ATC_IS_ERROR, &atchan->status); } - if (pending & AT_DMA_BTC(i)) - set_bit(ATC_IS_BTC, &atchan->status); tasklet_schedule(&atchan->tasklet); ret = IRQ_HANDLED; } @@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, desc->lli.ctrlb = ctrlb; desc->txd.cookie = 0; + desc->len = xfer_count << src_width; atc_desc_chain(&first, &prev, desc); } /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; - first->len = len; + first->total_len = len; + + /* set transfer width for the calculation of the residue */ first->tx_width = src_width; + prev->tx_width = src_width; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ATC_SRC_WIDTH(mem_width) | len >> mem_width; desc->lli.ctrlb = ctrlb; + desc->len = len; atc_desc_chain(&first, &prev, desc); total_len += len; @@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ATC_DST_WIDTH(mem_width) | len >> reg_width; desc->lli.ctrlb = ctrlb; + desc->len = len; atc_desc_chain(&first, &prev, desc); total_len += len; @@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; - first->len = total_len; + first->total_len = total_len; + + /* set transfer width for the calculation of the residue */ first->tx_width = reg_width; + prev->tx_width = reg_width; /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | ATC_FC_MEM2PER | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); + desc->len = period_len; break; case DMA_DEV_TO_MEM: @@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | ATC_FC_PER2MEM | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); + desc->len = period_len; break; default: @@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; - first->len = buf_len; + first->total_len = buf_len; first->tx_width = reg_width; return &first->txd; @@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan, spin_lock_irqsave(&atchan->lock, flags); /* Get number of bytes left in the active transactions */ - bytes = atc_get_bytes_left(chan); + bytes = atc_get_bytes_left(chan, cookie); spin_unlock_irqrestore(&atchan->lock, flags); @@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&atchan->lock, flags); atchan->descs_allocated = i; - atchan->remain_desc = 0; list_splice(&tmp_list, &atchan->free_list); dma_cookie_init(chan); spin_unlock_irqrestore(&atchan->lock, flags); @@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan) list_splice_init(&atchan->free_list, &list); atchan->descs_allocated = 0; atchan->status = 0; - atchan->remain_desc = 0; dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d6bba6c636c2..2727ca560572 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -181,8 +181,9 @@ struct at_lli { * @at_lli: hardware lli structure * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list - * @len: total transaction bytecount + * @len: descriptor byte count * @tx_width: transfer width + * @total_len: total transaction byte count */ struct at_desc { /* FIRST values the hardware uses */ @@ -194,6 +195,7 @@ struct at_desc { struct list_head desc_node; size_t len; u32 tx_width; + size_t total_len; }; static inline struct at_desc * @@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) enum atc_status { ATC_IS_ERROR = 0, ATC_IS_PAUSED = 1, - ATC_IS_BTC = 2, ATC_IS_CYCLIC = 24, }; @@ -231,7 +232,6 @@ enum atc_status { * @save_cfg: configuration register that is saved on suspend/resume cycle * @save_dscr: for cyclic operations, preserve next descriptor address in * the cyclic list on suspend/resume cycle - * @remain_desc: to save remain desc length * @dma_sconfig: configuration for slave transfers, passed via * .device_config * @lock: serializes enqueue/dequeue operations to descriptors lists @@ -251,7 +251,6 @@ struct at_dma_chan { struct tasklet_struct tasklet; u32 save_cfg; u32 save_dscr; - u32 remain_desc; struct dma_slave_config dma_sconfig; spinlock_t lock; diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 6565a361e7e5..b2c3ae071429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -26,6 +26,8 @@ #include "internal.h" +#define DRV_NAME "dw_dmac" + static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -284,7 +286,7 @@ static struct platform_driver dw_driver = { .remove = dw_remove, .shutdown = dw_shutdown, .driver = { - .name = "dw_dmac", + .name = DRV_NAME, .pm = &dw_dev_pm_ops, .of_match_table = of_match_ptr(dw_dma_of_id_table), .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), @@ -305,3 +307,4 @@ module_exit(dw_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 18c0a131e4e4..66a0efb9651d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma) dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); } + /* Set bits of CONFIG register with dynamic context switching */ + if (readl(sdma->regs + SDMA_H_CONFIG) == 0) + writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); + return ret ? 0 : -ETIMEDOUT; } @@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma) writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); - /* Set bits of CONFIG register with given context switching mode */ - writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); - /* Initializes channel's priorities */ sdma_set_channel_priority(&sdma->channel[0], 7); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 910ff8ab9c9c..d8135adb2238 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); + init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) { @@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, return retval; } - init_sdma_vm(dqm, q, qpd); + retval = mqd->load_mqd(mqd, q->mqd, 0, + 0, NULL); + if (retval != 0) { + deallocate_sdma_queue(dqm, q->sdma_id); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, BUG_ON(!kq || !dev); BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); - pr_debug("kfd: In func %s initializing queue type %d size %d\n", + pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", __func__, KFD_QUEUE_TYPE_HIQ, queue_size); nop.opcode = IT_NOP; @@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); - if (prop.doorbell_ptr == NULL) + if (prop.doorbell_ptr == NULL) { + pr_err("amdkfd: error init doorbell"); goto err_get_kernel_doorbell; + } retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); - if (retval != 0) + if (retval != 0) { + pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); goto err_pq_allocate_vidmem; + } kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_gpu_addr = kq->pq->gpu_addr; @@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - pr_err("kfd: error init pq\n"); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); err_get_kernel_doorbell: - pr_err("kfd: error init doorbell"); return false; } @@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); + kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); + kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem); kq->ops_asic_specific.uninitialize(kq); @@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, queue_address = (unsigned int *)kq->pq_kernel_addr; queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); - pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", + pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", __func__, rptr, wptr, queue_address); available_size = (rptr - 1 - wptr + queue_size_dwords) % @@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, } if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { - pr_err("kfd: failed to init kernel queue\n"); + pr_err("amdkfd: failed to init kernel queue\n"); kfree(kq); return NULL; } @@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) BUG_ON(!dev); - pr_err("kfd: starting kernel queue test\n"); + pr_err("amdkfd: starting kernel queue test\n"); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); BUG_ON(!kq); @@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) buffer[i] = kq->nop_packet; kq->ops.submit_packet(kq); - pr_err("kfd: ending kernel queue test\n"); + pr_err("amdkfd: ending kernel queue test\n"); } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..679b10e34fb5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -43,9 +43,10 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ @@ -524,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) } EXPORT_SYMBOL(drm_framebuffer_reference); -static void drm_framebuffer_free_bug(struct kref *kref) -{ - BUG(); -} - -static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); - kref_put(&fb->refcount, drm_framebuffer_free_bug); -} - /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister @@ -1319,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane) return; } /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(plane->old_fb); + drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; @@ -2908,13 +2898,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = add_framebuffer_internal(dev, &fbreq, file_priv); + fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } - - drm_framebuffer_reference(fb); } else { fb = NULL; } @@ -3267,9 +3255,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -3301,12 +3290,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, return fb; } - mutex_lock(&file_priv->fbs_lock); - r->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); - return fb; } @@ -3328,15 +3311,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - fb = add_framebuffer_internal(dev, data, file_priv); + fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); + /* Transfer ownership to the filp for reaping on close */ + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + mutex_lock(&file_priv->fbs_lock); + r->fb_id = fb->base.id; + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + return 0; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { bool ret; - mutex_lock(&mgr->qlock); + + /* + * All updates to txmsg->state are protected by mgr->qlock, and the two + * cases we check here are terminal states. For those the barriers + * provided by the wake_up/wait_event pair are enough. + */ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); - mutex_unlock(&mgr->qlock); return ret; } @@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, return 0; } -/* must be called holding qlock */ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; int ret; + WARN_ON(!mutex_is_locked(&mgr->qlock)); + /* construct a chunk from the first msg in the tx_msg queue */ if (list_empty(&mgr->tx_msg_downq)) { mgr->tx_down_in_progress = false; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) unsigned rem; rem = do_div(tmp, alignment); - if (tmp) + if (rem) start += alignment - rem; } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) of_node_put(i80_if_timings); ctx->regs = of_iomap(dev->of_node, 0); - if (IS_ERR(ctx->regs)) { - ret = PTR_ERR(ctx->regs); + if (!ctx->regs) { + ret = -ENOMEM; goto err_del_component; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> - -#include <drm/exynos_drm.h> -#include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" -#include "exynos_drm_connector.h" - -#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ - drm_connector) - -struct exynos_drm_connector { - struct drm_connector drm_connector; - uint32_t encoder_id; - struct exynos_drm_display *display; -}; - -static int exynos_drm_connector_get_modes(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - struct edid *edid = NULL; - unsigned int count = 0; - int ret; - - /* - * if get_edid() exists then get_edid() callback of hdmi side - * is called to get edid data through i2c interface else - * get timing from the FIMD driver(display controller). - * - * P.S. in case of lcd panel, count is always 1 if success - * because lcd panel has only one mode. - */ - if (display->ops->get_edid) { - edid = display->ops->get_edid(display, connector); - if (IS_ERR_OR_NULL(edid)) { - ret = PTR_ERR(edid); - edid = NULL; - DRM_ERROR("Panel operation get_edid failed %d\n", ret); - goto out; - } - - count = drm_add_edid_modes(connector, edid); - if (!count) { - DRM_ERROR("Add edid modes failed %d\n", count); - goto out; - } - - drm_mode_connector_update_edid_property(connector, edid); - } else { - struct exynos_drm_panel_info *panel; - struct drm_display_mode *mode = drm_mode_create(connector->dev); - if (!mode) { - DRM_ERROR("failed to create a new display mode.\n"); - return 0; - } - - if (display->ops->get_panel) - panel = display->ops->get_panel(display); - else { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_display_mode_from_videomode(&panel->vm, mode); - mode->width_mm = panel->width_mm; - mode->height_mm = panel->height_mm; - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - count = 1; - } - -out: - kfree(edid); - return count; -} - -static int exynos_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - int ret = MODE_BAD; - - DRM_DEBUG_KMS("%s\n", __FILE__); - - if (display->ops->check_mode) - if (!display->ops->check_mode(display, mode)) - ret = MODE_OK; - - return ret; -} - -static struct drm_encoder *exynos_drm_best_encoder( - struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - return drm_encoder_find(dev, exynos_connector->encoder_id); -} - -static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { - .get_modes = exynos_drm_connector_get_modes, - .mode_valid = exynos_drm_connector_mode_valid, - .best_encoder = exynos_drm_best_encoder, -}; - -static int exynos_drm_connector_fill_modes(struct drm_connector *connector, - unsigned int max_width, unsigned int max_height) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - unsigned int width, height; - - width = max_width; - height = max_height; - - /* - * if specific driver want to find desired_mode using maxmum - * resolution then get max width and height from that driver. - */ - if (display->ops->get_max_resol) - display->ops->get_max_resol(display, &width, &height); - - return drm_helper_probe_single_connector_modes(connector, width, - height); -} - -/* get detection status of display device. */ -static enum drm_connector_status -exynos_drm_connector_detect(struct drm_connector *connector, bool force) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - enum drm_connector_status status = connector_status_disconnected; - - if (display->ops->is_connected) { - if (display->ops->is_connected(display)) - status = connector_status_connected; - else - status = connector_status_disconnected; - } - - return status; -} - -static void exynos_drm_connector_destroy(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(exynos_connector); -} - -static struct drm_connector_funcs exynos_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = exynos_drm_connector_fill_modes, - .detect = exynos_drm_connector_detect, - .destroy = exynos_drm_connector_destroy, -}; - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder) -{ - struct exynos_drm_connector *exynos_connector; - struct exynos_drm_display *display = exynos_drm_get_display(encoder); - struct drm_connector *connector; - int type; - int err; - - exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); - if (!exynos_connector) - return NULL; - - connector = &exynos_connector->drm_connector; - - switch (display->type) { - case EXYNOS_DISPLAY_TYPE_HDMI: - type = DRM_MODE_CONNECTOR_HDMIA; - connector->interlace_allowed = true; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - case EXYNOS_DISPLAY_TYPE_VIDI: - type = DRM_MODE_CONNECTOR_VIRTUAL; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - default: - type = DRM_MODE_CONNECTOR_Unknown; - break; - } - - drm_connector_init(dev, connector, &exynos_connector_funcs, type); - drm_connector_helper_add(connector, &exynos_connector_helper_funcs); - - err = drm_connector_register(connector); - if (err) - goto err_connector; - - exynos_connector->encoder_id = encoder->base.id; - exynos_connector->display = display; - connector->dpms = DRM_MODE_DPMS_OFF; - connector->encoder = encoder; - - err = drm_mode_connector_attach_encoder(connector, encoder); - if (err) { - DRM_ERROR("failed to attach a connector to a encoder\n"); - goto err_sysfs; - } - - DRM_DEBUG_KMS("connector has been created\n"); - - return connector; - -err_sysfs: - drm_connector_unregister(connector); -err_connector: - drm_connector_cleanup(connector); - kfree(exynos_connector); - return NULL; -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_CONNECTOR_H_ -#define _EXYNOS_DRM_CONNECTOR_H_ - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..c300e22da8ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) } } -static int fimd_ctx_initialize(struct fimd_context *ctx, +static int fimd_iommu_attach_devices(struct fimd_context *ctx, struct drm_device *drm_dev) { - struct exynos_drm_private *priv; - priv = drm_dev->dev_private; - - ctx->drm_dev = drm_dev; - ctx->pipe = priv->pipe++; /* attach this sub driver to iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) { @@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, return 0; } -static void fimd_ctx_remove(struct fimd_context *ctx) +static void fimd_iommu_detach_devices(struct fimd_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) @@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) { struct fimd_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; + struct exynos_drm_private *priv = drm_dev->dev_private; int ret; - ret = fimd_ctx_initialize(ctx, drm_dev); - if (ret) { - DRM_ERROR("fimd_ctx_initialize failed.\n"); - return ret; - } + ctx->drm_dev = drm_dev; + ctx->pipe = priv->pipe++; ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, &fimd_crtc_ops, ctx); - if (IS_ERR(ctx->crtc)) { - fimd_ctx_remove(ctx); - return PTR_ERR(ctx->crtc); - } if (ctx->display) exynos_drm_create_enc_conn(drm_dev, ctx->display); + ret = fimd_iommu_attach_devices(ctx, drm_dev); + if (ret) + return ret; + return 0; } @@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); + fimd_iommu_detach_devices(ctx); + if (ctx->display) exynos_dpi_remove(ctx->display); - - fimd_ctx_remove(ctx); } static const struct component_ops fimd_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); - if (exynos_crtc->ops->win_disable) + if (exynos_crtc && exynos_crtc->ops->win_disable) exynos_crtc->ops->win_disable(exynos_crtc, exynos_plane->zpos); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..27ea6bdebce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) WARN_ON(i915_verify_lists(ring->dev)); - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate, - * before we free the context associated with the requests. + /* Retire requests first as we use it above for the early return. + * If we retire requests last, we may use a later seqno and so clear + * the requests lists without clearing the active list, leading to + * confusion. */ - while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); - - if (!i915_gem_request_completed(obj->last_read_req, true)) - break; - - i915_gem_object_move_to_inactive(obj); - } - - while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; struct intel_ringbuffer *ringbuf; @@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) i915_gem_free_request(request); } + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate, + * before we free the context associated with the requests. + */ + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_gem_request_completed(obj->last_read_req, true)) + break; + + i915_gem_object_move_to_inactive(obj); + } + if (unlikely(ring->trace_irq_req && i915_gem_request_completed(ring->trace_irq_req, true))) { ring->irq_put(ring); @@ -2936,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) req = obj->last_read_req; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a timeout <=0 (like busy ioctl) + * on this IOCTL with a timeout == 0 (like busy ioctl) */ - if (args->timeout_ns <= 0) { + if (args->timeout_ns == 0) { ret = -ETIME; goto out; } @@ -2948,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); - ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, + ret = __i915_wait_request(req, reset_counter, true, + args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); mutex_lock(&dev->struct_mutex); i915_gem_request_unreference(req); @@ -4792,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -4824,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { ret = ring->init_hw(ring); if (ret) - return ret; + goto out; } for (i = 0; i < NUM_L3_SLICES(dev); i++) @@ -4841,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev) DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - return ret; + goto out; } +out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } @@ -4877,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev) dev_priv->gt.stop_ring = intel_logical_ring_stop; } + /* This is just a security blanket to placate dragons. + * On some systems, we very sporadically observe that the first TLBs + * used by the CS may be stale, despite us poking the TLB reset. If + * we hold the forcewake during initialisation these problems + * just magically go away. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = i915_gem_init_userptr(dev); if (ret) goto out_unlock; @@ -4903,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev) } out_unlock: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..f75173c20f47 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -37,6 +37,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_crtc_helper.h> @@ -2416,6 +2417,14 @@ out_unref_obj: return false; } +/* Update plane->state->fb to match plane->fb after driver-internal updates */ +static void +update_state_fb(struct drm_plane *plane) +{ + if (plane->fb != plane->state->fb) + drm_atomic_set_fb_for_plane(plane->state, plane->fb); +} + static void intel_find_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -2429,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, if (!intel_crtc->base.primary->fb) return; - if (intel_alloc_plane_obj(intel_crtc, plane_config)) + if (intel_alloc_plane_obj(intel_crtc, plane_config)) { + struct drm_plane *primary = intel_crtc->base.primary; + + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; + update_state_fb(primary); + return; + } kfree(intel_crtc->base.primary->fb); intel_crtc->base.primary->fb = NULL; @@ -2453,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, continue; if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + struct drm_plane *primary = intel_crtc->base.primary; + if (obj->tiling_mode != I915_TILING_NONE) dev_priv->preserve_bios_swizzle = true; drm_framebuffer_reference(c->primary->fb); - intel_crtc->base.primary->fb = c->primary->fb; + primary->fb = c->primary->fb; + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); break; } } + + update_state_fb(intel_crtc->base.primary); } static void i9xx_update_primary_plane(struct drm_crtc *crtc, @@ -6602,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; + val = I915_READ(DSPCNTR(plane)); + if (!(val & DISPLAY_PLANE_ENABLE)) + return; + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -6610,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - val = I915_READ(DSPCNTR(plane)); - if (INTEL_INFO(dev)->gen >= 4) if (val & DISPPLANE_TILED) plane_config->tiling = I915_TILING_X; @@ -7643,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; val = I915_READ(PLANE_CTL(pipe, 0)); + if (!(val & PLANE_CTL_ENABLE)) + goto error; + if (val & PLANE_CTL_TILED_MASK) plane_config->tiling = I915_TILING_X; @@ -7730,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; + val = I915_READ(DSPCNTR(pipe)); + if (!(val & DISPLAY_PLANE_ENABLE)) + return; + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -7738,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - val = I915_READ(DSPCNTR(pipe)); - if (INTEL_INFO(dev)->gen >= 4) if (val & DISPPLANE_TILED) plane_config->tiling = I915_TILING_X; @@ -9716,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - WARN_ON(!in_irq()); + WARN_ON(!in_interrupt()); if (crtc == NULL) return; @@ -9816,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(&obj->base); crtc->primary->fb = fb; + update_state_fb(crtc->primary); work->pending_flip_obj = obj; @@ -9884,6 +9914,7 @@ cleanup_unpin: cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); crtc->primary->fb = old_fb; + update_state_fb(crtc->primary); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -13718,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev) to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); c->primary->fb = NULL; + update_state_fb(c->primary); } } mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is - * not working + * not working. In this stage we don't know which flavour this + * ivb is, so it is better to reset also the gen6 fw registers + * before the ecobus check. */ + + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + __raw_posting_read(dev_priv, ECOBUS); + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, /* switch mmio to cpu's native endianness */ #ifndef __BIG_ENDIAN - if (ioread32_native(map + 0x000004) != 0x00000000) + if (ioread32_native(map + 0x000004) != 0x00000000) { #else - if (ioread32_native(map + 0x000004) == 0x00000000) + if (ioread32_native(map + 0x000004) == 0x00000000) { #endif iowrite32_native(0x01000001, map + 0x000004); + ioread32_native(map); + } /* read boot0 and strapping information */ boot0 = ioread32_native(map + 0x000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c @@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; #endif break; + case 0x126: + device->cname = "GM206"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; + device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; +#if 0 + /* looks to be some non-trivial changes */ + device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; + /* priv ring says no to 0x10eb14 writes */ + device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; +#endif + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; + device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; +#if 0 + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; +#endif + device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; + device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; +#endif + device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; + device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; + device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; + device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; + device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; + device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; +#endif + break; default: nv_fatal(device, "unknown Maxwell chipset\n"); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) { struct nvkm_device *device = nv_device(subdev); struct nv04_fifo_priv *priv = (void *)subdev; - uint32_t status, reassign; - int cnt = 0; + u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); + u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; + u32 reassign, chid, get, sem; reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; - while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - uint32_t chid, get; - - nv_wr32(priv, NV03_PFIFO_CACHES, 0); - - chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; - get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); + nv_wr32(priv, NV03_PFIFO_CACHES, 0); - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - nv04_fifo_cache_error(device, priv, chid, get); - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - } + chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - nv04_fifo_dma_pusher(device, priv, chid); - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - } + if (stat & NV_PFIFO_INTR_CACHE_ERROR) { + nv04_fifo_cache_error(device, priv, chid, get); + stat &= ~NV_PFIFO_INTR_CACHE_ERROR; + } - if (status & NV_PFIFO_INTR_SEMAPHORE) { - uint32_t sem; + if (stat & NV_PFIFO_INTR_DMA_PUSHER) { + nv04_fifo_dma_pusher(device, priv, chid); + stat &= ~NV_PFIFO_INTR_DMA_PUSHER; + } - status &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(priv, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_SEMAPHORE); + if (stat & NV_PFIFO_INTR_SEMAPHORE) { + stat &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); - sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - } + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + } - if (device->card_type == NV_50) { - if (status & 0x00000010) { - status &= ~0x00000010; - nv_wr32(priv, 0x002100, 0x00000010); - } - - if (status & 0x40000000) { - nv_wr32(priv, 0x002100, 0x40000000); - nvkm_fifo_uevent(&priv->base); - status &= ~0x40000000; - } + if (device->card_type == NV_50) { + if (stat & 0x00000010) { + stat &= ~0x00000010; + nv_wr32(priv, 0x002100, 0x00000010); } - if (status) { - nv_warn(priv, "unknown intr 0x%08x, ch %d\n", - status, chid); - nv_wr32(priv, NV03_PFIFO_INTR_0, status); - status = 0; + if (stat & 0x40000000) { + nv_wr32(priv, 0x002100, 0x40000000); + nvkm_fifo_uevent(&priv->base); + stat &= ~0x40000000; } - - nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } - if (status) { - nv_error(priv, "still angry after %d spins, halt\n", cnt); - nv_wr32(priv, 0x002140, 0); - nv_wr32(priv, 0x000140, 0); + if (stat) { + nv_warn(priv, "unknown intr 0x%08x\n", stat); + nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); + nv_wr32(priv, NV03_PFIFO_INTR_0, stat); } - nv_wr32(priv, 0x000100, 0x00000100); + nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418e24, 0x00000000, s, b); - mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c @@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); if (ent) { if (ver >= 0x41) { - if (!(nv_ro32(bios, ent) & 0x80000000)) + u32 ent_value = nv_ro32(bios, ent); + u8 i2c_port = (ent_value >> 27) & 0x1f; + u8 dpaux_port = (ent_value >> 22) & 0x1f; + /* value 0x1f means unused according to DCB 4.x spec */ + if (i2c_port == 0x1f && dpaux_port == 0x1f) info->type = DCB_I2C_UNUSED; else info->type = DCB_I2C_PMGR; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +struct radeon_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct radeon_wait_cb *wait = + container_of(cb, struct radeon_wait_cb, base); + + wake_up_process(wait->task); +} + static signed long radeon_fence_default_wait(struct fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; - bool signaled; + struct radeon_wait_cb cb; - fence_enable_sw_signaling(&fence->base); + cb.task = current; - /* - * This function has to return -EDEADLK, but cannot hold - * exclusive_lock during the wait because some callers - * may already hold it. This means checking needs_reset without - * lock, and not fiddling with any gpu internals. - * - * The callback installed with fence_enable_sw_signaling will - * run before our wait_event_*timeout call, so we will see - * both the signaled fence and the changes to needs_reset. - */ + if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + return t; + + while (t > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + /* + * radeon_test_signaled must be called after + * set_current_state to prevent a race with wake_up_process + */ + if (radeon_test_signaled(fence)) + break; + + if (rdev->needs_reset) { + t = -EDEADLK; + break; + } + + t = schedule_timeout(t); + + if (t > 0 && intr && signal_pending(current)) + t = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + fence_remove_callback(f, &cb.base); - if (intr) - t = wait_event_interruptible_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - else - t = wait_event_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - - if (t > 0 && !signaled) - return -EDEADLK; return t; } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 061eaa9c19c7..122eb5693ba1 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) .compute_vmid_bitmap = 0xFF00, .first_compute_pipe = 1, - .compute_pipe_count = 8 - 1, + .compute_pipe_count = 4 - 1, }; radeon_doorbell_get_kfd_info(rdev, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) else rbo->placements[i].lpfn = 0; } - - /* - * Use two-ended allocation depending on the buffer size to - * improve fragmentation quality. - * 512kb was measured as the most optimal number. - */ - if (rbo->tbo.mem.size > 512 * 1024) { - for (i = 0; i < c; i++) { - rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; - } - } } int radeon_bo_create(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e088e5558da0..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err1; } - ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, - (dev_priv->vram_size >> PAGE_SHIFT)); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); - goto out_err2; - } - - dev_priv->has_gmr = true; - if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || - refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " - "Graphics memory resources are very limited.\n"); - dev_priv->has_gmr = false; - } - - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { - dev_priv->has_mob = true; - if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, - VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " - "3D will be disabled.\n"); - dev_priv->has_mob = false; - } - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + dev_priv->has_gmr = true; + if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || + refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + VMW_PL_GMR) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -838,6 +839,12 @@ out_no_fifo: vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_no_vram: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -853,12 +860,6 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); -out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); @@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); @@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + pci_disable_device(pdev); drm_put_dev(dev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, NULL, arg->command_size, arg->throttle_us, (void __user *)(unsigned long)arg->fence_rep, NULL); - + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) - goto out_unlock; + return ret; vmw_kms_cursor_post_execbuf(dev_priv); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int i; struct drm_mode_config *mode_config = &dev->mode_config; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); - goto out_unlock; + return 0; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), GFP_KERNEL); - if (unlikely(!rects)) { - ret = -ENOMEM; - goto out_unlock; - } + if (unlikely(!rects)) + return -ENOMEM; user_rects = (void __user *)(unsigned long)arg->rects; ret = copy_from_user(rects, user_rects, rects_size); @@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, out_free: kfree(rects); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); return ret; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 7c669c328c4c..56ce8c2b5530 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 204312bfab2c..9c4786759f16 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -586,6 +586,7 @@ #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e #define USB_DEVICE_ID_LOGITECH_T651 0xb00c +#define USB_DEVICE_ID_LOGITECH_C077 0xc007 #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f @@ -898,6 +899,7 @@ #define USB_VENDOR_ID_TIVO 0x150a #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 +#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203 #define USB_VENDOR_ID_TOPSEED 0x0766 #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c index d790d8d71f7f..d98696927453 100644 --- a/drivers/hid/hid-tivo.c +++ b/drivers/hid/hid-tivo.c @@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = { /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { } }; MODULE_DEVICE_TABLE(hid, tivo_devices); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 9be99a67bfe2..a82127753461 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -78,6 +78,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 046351cf17f3..bbe32d66e500 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) (features->type == CINTIQ && !(data[1] & 0x40))) return 1; - if (features->quirks & WACOM_QUIRK_MULTI_INPUT) + if (wacom->shared) { wacom->shared->stylus_in_proximity = true; + if (wacom->shared->touch_down) + return 1; + } + /* in Range while exiting */ if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { input_report_key(input, BTN_TOUCH, 0); @@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int i; - int current_num_contacts = 0; + int current_num_contacts = data[61]; int contacts_to_send = 0; int num_contacts_left = 4; /* maximum contacts per packet */ int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; int y_offset = 2; + static int contact_with_no_pen_down_count = 0; if (wacom->features.type == WACOM_27QHDT) { current_num_contacts = data[63]; num_contacts_left = 10; byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; y_offset = 0; - } else { - current_num_contacts = data[61]; } /* * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ - if (current_num_contacts) + if (current_num_contacts) { wacom->num_contacts_left = current_num_contacts; + contact_with_no_pen_down_count = 0; + } contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); @@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); input_report_abs(input, ABS_MT_ORIENTATION, w > h); } + contact_with_no_pen_down_count++; } } input_mt_report_pointer_emulation(input, true); wacom->num_contacts_left -= contacts_to_send; - if (wacom->num_contacts_left <= 0) + if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; - - wacom->shared->touch_down = (wacom->num_contacts_left > 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); + } return 1; } @@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom) int current_num_contacts = data[2]; int contacts_to_send = 0; int x_offset = 0; + static int contact_with_no_pen_down_count = 0; /* MTTPC does not support Height and Width */ if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) @@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom) * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ - if (current_num_contacts) + if (current_num_contacts) { wacom->num_contacts_left = current_num_contacts; + contact_with_no_pen_down_count = 0; + } /* There are at most 5 contacts per packet */ contacts_to_send = min(5, wacom->num_contacts_left); @@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom) int y = get_unaligned_le16(&data[offset + x_offset + 9]); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); + contact_with_no_pen_down_count++; } } input_mt_report_pointer_emulation(input, true); wacom->num_contacts_left -= contacts_to_send; - if (wacom->num_contacts_left < 0) + if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; - - wacom->shared->touch_down = (wacom->num_contacts_left > 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); + } return 1; } @@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; struct input_dev *input = wacom->input; - bool prox; + bool prox = !wacom->shared->stylus_in_proximity; int x = 0, y = 0; if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) return 0; - if (!wacom->shared->stylus_in_proximity) { - if (len == WACOM_PKGLEN_TPC1FG) { - prox = data[0] & 0x01; - x = get_unaligned_le16(&data[1]); - y = get_unaligned_le16(&data[3]); - } else if (len == WACOM_PKGLEN_TPC1FG_B) { - prox = data[2] & 0x01; - x = get_unaligned_le16(&data[3]); - y = get_unaligned_le16(&data[5]); - } else { - prox = data[1] & 0x01; - x = le16_to_cpup((__le16 *)&data[2]); - y = le16_to_cpup((__le16 *)&data[4]); - } - } else - /* force touch out when pen is in prox */ - prox = 0; + if (len == WACOM_PKGLEN_TPC1FG) { + prox = prox && (data[0] & 0x01); + x = get_unaligned_le16(&data[1]); + y = get_unaligned_le16(&data[3]); + } else if (len == WACOM_PKGLEN_TPC1FG_B) { + prox = prox && (data[2] & 0x01); + x = get_unaligned_le16(&data[3]); + y = get_unaligned_le16(&data[5]); + } else { + prox = prox && (data[1] & 0x01); + x = le16_to_cpup((__le16 *)&data[2]); + y = le16_to_cpup((__le16 *)&data[4]); + } if (prox) { input_report_abs(input, ABS_X, x); @@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int i; + int contact_with_no_pen_down_count = 0; if (data[0] != 0x02) return 0; @@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); + contact_with_no_pen_down_count++; } } @@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); return 1; } -static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) +static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; @@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) int slot = input_mt_get_slot_by_key(input, data[0]); if (slot < 0) - return; + return 0; touch = touch && !wacom->shared->stylus_in_proximity; @@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) input_report_abs(input, ABS_MT_POSITION_Y, y); input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); input_report_abs(input, ABS_MT_TOUCH_MINOR, height); + last_touch_count++; } + return last_touch_count; } static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) @@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) unsigned char *data = wacom->data; int count = data[1] & 0x07; int i; + int contact_with_no_pen_down_count = 0; if (data[0] != 0x02) return 0; @@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) int msg_id = data[offset]; if (msg_id >= 2 && msg_id <= 17) - wacom_bpt3_touch_msg(wacom, data + offset); + contact_with_no_pen_down_count = + wacom_bpt3_touch_msg(wacom, data + offset, + contact_with_no_pen_down_count); else if (msg_id == 128) wacom_bpt3_button_msg(wacom, data + offset); } input_mt_report_pointer_emulation(input, true); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); return 1; } @@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) return 0; } + if (wacom->shared->touch_down) + return 0; + prox = (data[1] & 0x20) == 0x20; /* diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) status = driver->remove(client); } - if (dev->of_node) - irq_dispose_mapping(client->irq); - dev_pm_domain_detach(&client->dev, true); return status; } diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1793aea4a7d2..6eb738ca6d2f 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX); printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " - "%lums tDSC%s\n", + "%ums tDSC%s\n", drive->name, tape->name, *(u16 *)&tape->caps[14], (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, tape->buffer_size / 1024, - tape->best_dsc_rw_freq * 1000 / HZ, + jiffies_to_msecs(tape->best_dsc_rw_freq), (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); ide_proc_register_driver(drive, tape->driver); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c7619716c31d..59040265e361 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -64,6 +64,14 @@ enum { #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) +/* Counters should be saturate once they reach their maximum value */ +#define ASSIGN_32BIT_COUNTER(counter, value) do {\ + if ((value) > U32_MAX) \ + counter = cpu_to_be32(U32_MAX); \ + else \ + counter = cpu_to_be32(value); \ +} while (0) + struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; @@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, static void edit_counter(struct mlx4_counter *cnt, struct ib_pma_portcounters *pma_cnt) { - pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); - pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); - pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); - pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, + (be64_to_cpu(cnt->tx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, + (be64_to_cpu(cnt->rx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, + be64_to_cpu(cnt->tx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, + be64_to_cpu(cnt->rx_frames)); } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ac6e2b710ea6..b972c0b41799 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work) spin_lock_bh(&ibdev->iboe.lock); for (i = 0; i < MLX4_MAX_PORTS; ++i) { struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; + enum ib_port_state curr_port_state; - enum ib_port_state curr_port_state = + if (!curr_netdev) + continue; + + curr_port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c @@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) input_set_drvdata(input, keypad); - error = request_threaded_irq(irq, NULL, - tc3589x_keypad_irq, plat->irqtype, - "tc3589x-keypad", keypad); + error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, + plat->irqtype | IRQF_ONESHOT, + "tc3589x-keypad", keypad); if (error < 0) { dev_err(&pdev->dev, "Could not allocate irq %d,error %d\n", diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, idev->private = m; idev->input->name = MMA8450_DRV_NAME; idev->input->id.bustype = BUS_I2C; + idev->input->dev.parent = &c->dev; idev->poll = mma8450_poll; idev->poll_interval = POLL_INTERVAL; idev->poll_interval_max = POLL_INTERVAL_MAX; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) return -ENOMEM; error = alps_identify(psmouse, priv); - if (error) + if (error) { + kfree(priv); return error; + } if (set_properties) { psmouse->vendor = "ALPS"; diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c @@ -20,7 +20,7 @@ #include <linux/input/mt.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include "cyapa.h" diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c @@ -17,7 +17,7 @@ #include <linux/mutex.h> #include <linux/completion.h> #include <linux/slab.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include <linux/crc-itu-t.h> #include "cyapa.h" @@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, electrodes_tx = cyapa->electrodes_x; max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & ~7u) * electrodes_tx; - } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { + } else { offset = 2; max_element_cnt = cyapa->electrodes_x + cyapa->electrodes_y; diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c @@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) #define FOC_MAX_FINGERS 5 -#define FOC_MAX_X 2431 -#define FOC_MAX_Y 1663 - /* * Current state of a single finger on the touchpad. */ @@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) input_mt_slot(dev, i); input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); if (active) { - input_report_abs(dev, ABS_MT_POSITION_X, finger->x); + unsigned int clamped_x, clamped_y; + /* + * The touchpad might report invalid data, so we clamp + * the resulting values so that we do not confuse + * userspace. + */ + clamped_x = clamp(finger->x, 0U, priv->x_max); + clamped_y = clamp(finger->y, 0U, priv->y_max); + input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); input_report_abs(dev, ABS_MT_POSITION_Y, - FOC_MAX_Y - finger->y); + priv->y_max - clamped_y); } } input_mt_report_pointer_emulation(dev, true); @@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, state->pressed = (packet[0] >> 4) & 1; - /* - * packet[5] contains some kind of tool size in the most - * significant nibble. 0xff is a special value (latching) that - * signals a large contact area. - */ - if (packet[5] == 0xff) { - state->fingers[finger].valid = false; - return; - } - state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; state->fingers[finger].y = (packet[3] << 8) | packet[4]; state->fingers[finger].valid = true; @@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) return 0; } + +void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) +{ + /* not supported yet */ +} + +static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) +{ + /* not supported yet */ +} + +static void focaltech_set_scale(struct psmouse *psmouse, + enum psmouse_scale scale) +{ + /* not supported yet */ +} + int focaltech_init(struct psmouse *psmouse) { struct focaltech_data *priv; @@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) psmouse->cleanup = focaltech_reset; /* resync is not supported yet */ psmouse->resync_time = 0; + /* + * rate/resolution/scale changes are not supported yet, and + * the generic implementations of these functions seem to + * confuse some touchpads + */ + psmouse->set_resolution = focaltech_set_resolution; + psmouse->set_rate = focaltech_set_rate; + psmouse->set_scale = focaltech_set_scale; return 0; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) } /* + * Here we set the mouse scaling. + */ + +static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) +{ + ps2_command(&psmouse->ps2dev, NULL, + scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : + PSMOUSE_CMD_SETSCALE11); +} + +/* * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. */ @@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) psmouse->set_rate = psmouse_set_rate; psmouse->set_resolution = psmouse_set_resolution; + psmouse->set_scale = psmouse_set_scale; psmouse->poll = psmouse_poll; psmouse->protocol_handler = psmouse_process_byte; psmouse->pktsize = 3; @@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) if (psmouse_max_proto != PSMOUSE_PS2) { psmouse->set_rate(psmouse, psmouse->rate); psmouse->set_resolution(psmouse, psmouse->resolution); - ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); + psmouse->set_scale(psmouse, PSMOUSE_SCALE11); } } diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h @@ -36,6 +36,11 @@ typedef enum { PSMOUSE_FULL_PACKET } psmouse_ret_t; +enum psmouse_scale { + PSMOUSE_SCALE11, + PSMOUSE_SCALE21 +}; + struct psmouse { void *private; struct input_dev *dev; @@ -67,6 +72,7 @@ struct psmouse { psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); void (*set_rate)(struct psmouse *psmouse, unsigned int rate); void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); + void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); int (*reconnect)(struct psmouse *psmouse); void (*disconnect)(struct psmouse *psmouse); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f2cceb6493a0..dda605836546 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -67,9 +67,6 @@ #define X_MAX_POSITIVE 8176 #define Y_MAX_POSITIVE 8176 -/* maximum ABS_MT_POSITION displacement (in mm) */ -#define DMAX 10 - /***************************************************************************** * Stuff we need even when we do not want native Synaptics support ****************************************************************************/ @@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse) static bool cr48_profile_sensor; +#define ANY_BOARD_ID 0 struct min_max_quirk { const char * const *pnp_ids; + struct { + unsigned long int min, max; + } board_id; int x_min, x_max, y_min, y_max; }; static const struct min_max_quirk min_max_pnpid_table[] = { { (const char * const []){"LEN0033", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1024, 5052, 2258, 4832 }, { - (const char * const []){"LEN0035", "LEN0042", NULL}, + (const char * const []){"LEN0042", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1232, 5710, 1156, 4696 }, { (const char * const []){"LEN0034", "LEN0036", "LEN0037", "LEN0039", "LEN2002", "LEN2004", NULL}, + {ANY_BOARD_ID, 2961}, 1024, 5112, 2024, 4832 }, { (const char * const []){"LEN2001", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1024, 5022, 2508, 4832 }, { (const char * const []){"LEN2006", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1264, 5675, 1171, 4688 }, { } @@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0041", "LEN0042", /* Yoga */ "LEN0045", - "LEN0046", "LEN0047", - "LEN0048", "LEN0049", "LEN2000", "LEN2001", /* Edge E431 */ @@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse) return 0; } +static int synaptics_more_extended_queries(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + unsigned char buf[3]; + + if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf)) + return -1; + + priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2]; + + return 0; +} + /* - * Read the board id from the touchpad + * Read the board id and the "More Extended Queries" from the touchpad * The board id is encoded in the "QUERY MODES" response */ -static int synaptics_board_id(struct psmouse *psmouse) +static int synaptics_query_modes(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char bid[3]; + /* firmwares prior 7.5 have no board_id encoded */ + if (SYN_ID_FULL(priv->identity) < 0x705) + return 0; + if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) return -1; priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; + + if (SYN_MEXT_CAP_BIT(bid[0])) + return synaptics_more_extended_queries(psmouse); + return 0; } @@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char resp[3]; - int i; if (SYN_ID_MAJOR(priv->identity) < 4) return 0; @@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse) } } - for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { - if (psmouse_matches_pnp_id(psmouse, - min_max_pnpid_table[i].pnp_ids)) { - priv->x_min = min_max_pnpid_table[i].x_min; - priv->x_max = min_max_pnpid_table[i].x_max; - priv->y_min = min_max_pnpid_table[i].y_min; - priv->y_max = min_max_pnpid_table[i].y_max; - return 0; - } - } - if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { @@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse) } else { priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried max coordinates: x [..%d], y [..%d]\n", + priv->x_max, priv->y_max); } } - if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && - SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { + if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) && + (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 || + /* + * Firmware v8.1 does not report proper number of extended + * capabilities, but has been proven to report correct min + * coordinates. + */ + SYN_ID_FULL(priv->identity) == 0x801)) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { psmouse_warn(psmouse, "device claims to have min coordinates query, but I'm not able to read it.\n"); } else { priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried min coordinates: x [%d..], y [%d..]\n", + priv->x_min, priv->y_min); } } return 0; } +/* + * Apply quirk(s) if the hardware matches + */ + +static void synaptics_apply_quirks(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + int i; + + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { + if (!psmouse_matches_pnp_id(psmouse, + min_max_pnpid_table[i].pnp_ids)) + continue; + + if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID && + priv->board_id < min_max_pnpid_table[i].board_id.min) + continue; + + if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID && + priv->board_id > min_max_pnpid_table[i].board_id.max) + continue; + + priv->x_min = min_max_pnpid_table[i].x_min; + priv->x_max = min_max_pnpid_table[i].x_max; + priv->y_min = min_max_pnpid_table[i].y_min; + priv->y_max = min_max_pnpid_table[i].y_max; + psmouse_info(psmouse, + "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n", + priv->x_min, priv->x_max, + priv->y_min, priv->y_max); + break; + } +} + static int synaptics_query_hardware(struct psmouse *psmouse) { if (synaptics_identify(psmouse)) @@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse) return -1; if (synaptics_firmware_id(psmouse)) return -1; - if (synaptics_board_id(psmouse)) + if (synaptics_query_modes(psmouse)) return -1; if (synaptics_capability(psmouse)) return -1; if (synaptics_resolution(psmouse)) return -1; + synaptics_apply_quirks(psmouse); + return 0; } @@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf) return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; } -static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) +static void synaptics_pass_pt_packet(struct psmouse *psmouse, + struct serio *ptport, + unsigned char *packet) { + struct synaptics_data *priv = psmouse->private; struct psmouse *child = serio_get_drvdata(ptport); if (child && child->state == PSMOUSE_ACTIVATED) { - serio_interrupt(ptport, packet[1], 0); + serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0); serio_interrupt(ptport, packet[4], 0); serio_interrupt(ptport, packet[5], 0); if (child->pktsize == 4) serio_interrupt(ptport, packet[2], 0); - } else + } else { serio_interrupt(ptport, packet[1], 0); + } } static void synaptics_pt_activate(struct psmouse *psmouse) @@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[], } } +static void synaptics_parse_ext_buttons(const unsigned char buf[], + struct synaptics_data *priv, + struct synaptics_hw_state *hw) +{ + unsigned int ext_bits = + (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + unsigned int ext_mask = GENMASK(ext_bits - 1, 0); + + hw->ext_buttons = buf[4] & ext_mask; + hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits; +} + static bool is_forcepad; static int synaptics_parse_hw_state(const unsigned char buf[], @@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[], hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; } - if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && + if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 && ((buf[0] ^ buf[3]) & 0x02)) { - switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { - default: - /* - * if nExtBtn is greater than 8 it should be - * considered invalid and treated as 0 - */ - break; - case 8: - hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; - hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; - case 6: - hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; - hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; - case 4: - hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; - hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; - case 2: - hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; - hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; - } + synaptics_parse_ext_buttons(buf, priv, hw); } } else { hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); @@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev, } } +static void synaptics_report_ext_buttons(struct psmouse *psmouse, + const struct synaptics_hw_state *hw) +{ + struct input_dev *dev = psmouse->dev; + struct synaptics_data *priv = psmouse->private; + int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + int i; + + if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) + return; + + /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ + if (SYN_ID_FULL(priv->identity) == 0x801 && + !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) + return; + + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) { + for (i = 0; i < ext_bits; i++) { + input_report_key(dev, BTN_0 + 2 * i, + hw->ext_buttons & (1 << i)); + input_report_key(dev, BTN_1 + 2 * i, + hw->ext_buttons & (1 << (i + ext_bits))); + } + return; + } + + /* + * This generation of touchpads has the trackstick buttons + * physically wired to the touchpad. Re-route them through + * the pass-through interface. + */ + if (!priv->pt_port) + return; + + /* The trackstick expects at most 3 buttons */ + priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) | + SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 | + SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2; + + synaptics_pass_pt_packet(psmouse, priv->pt_port, buf); +} + static void synaptics_report_buttons(struct psmouse *psmouse, const struct synaptics_hw_state *hw) { struct input_dev *dev = psmouse->dev; struct synaptics_data *priv = psmouse->private; - int i; input_report_key(dev, BTN_LEFT, hw->left); input_report_key(dev, BTN_RIGHT, hw->right); @@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse, input_report_key(dev, BTN_BACK, hw->down); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i)); + synaptics_report_ext_buttons(psmouse, hw); } static void synaptics_report_mt_data(struct psmouse *psmouse, @@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse, pos[i].y = synaptics_invert_y(hw[i]->y); } - input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); + input_mt_assign_slots(dev, slot, pos, nsemi, 0); for (i = 0; i < nsemi; i++) { input_mt_slot(dev, slot[i]); @@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) if (SYN_CAP_PASS_THROUGH(priv->capabilities) && synaptics_is_pt_packet(psmouse->packet)) { if (priv->pt_port) - synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); + synaptics_pass_pt_packet(psmouse, priv->pt_port, + psmouse->packet); } else synaptics_process_packet(psmouse); @@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse, __set_bit(BTN_BACK, dev->keybit); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - __set_bit(BTN_0 + i, dev->keybit); + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) + for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) + __set_bit(BTN_0 + i, dev->keybit); __clear_bit(EV_REL, dev->evbit); __clear_bit(REL_X, dev->relbit); @@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse, if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); - if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) + if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && + !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); /* Clickpads report only left button */ __clear_bit(BTN_RIGHT, dev->keybit); diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index aedc3299b14e..ee4bd0d12b26 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -22,6 +22,7 @@ #define SYN_QUE_EXT_CAPAB_0C 0x0c #define SYN_QUE_EXT_MAX_COORDS 0x0d #define SYN_QUE_EXT_MIN_COORDS 0x0f +#define SYN_QUE_MEXT_CAPAB_10 0x10 /* synatics modes */ #define SYN_BIT_ABSOLUTE_MODE (1 << 7) @@ -53,6 +54,7 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) +#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1)) /* * The following describes response for the 0x0c query. @@ -89,6 +91,30 @@ #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) +/* + * The following descibes response for the 0x10 query. + * + * byte mask name meaning + * ---- ---- ------- ------------ + * 1 0x01 ext buttons are stick buttons exported in the extended + * capability are actually meant to be used + * by the tracktick (pass-through). + * 1 0x02 SecurePad the touchpad is a SecurePad, so it + * contains a built-in fingerprint reader. + * 1 0xe0 more ext count how many more extented queries are + * available after this one. + * 2 0xff SecurePad width the width of the SecurePad fingerprint + * reader. + * 3 0xff SecurePad height the height of the SecurePad fingerprint + * reader. + */ +#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000) +#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000) + +#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01)) +#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02)) +#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04)) + /* synaptics modes query bits */ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) #define SYN_MODE_RATE(m) ((m) & (1 << 6)) @@ -143,6 +169,7 @@ struct synaptics_data { unsigned long int capabilities; /* Capabilities */ unsigned long int ext_cap; /* Extended Capabilities */ unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ + unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */ unsigned long int identity; /* Identification */ unsigned int x_res, y_res; /* X/Y resolution in units/mm */ unsigned int x_max, y_max; /* Max coordinates (from FW) */ @@ -156,6 +183,7 @@ struct synaptics_data { bool disable_gesture; /* disable gestures */ struct serio *pt_port; /* Pass-through serio port */ + unsigned char pt_buttons; /* Pass-through buttons */ /* * Last received Advanced Gesture Mode (AGM) packet. An AGM packet diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I tristate "Allwinner sun4i resistive touchscreen controller support" depends on ARCH_SUNXI || COMPILE_TEST depends on HWMON + depends on THERMAL || !THERMAL_OF help This selects support for the resistive touchscreen controller found on Allwinner sunxi SoCs. diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE + depends on ARM || ARM64 || COMPILE_TEST help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -63,6 +64,7 @@ config MSM_IOMMU bool "MSM IOMMU Support" depends on ARM depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST + depends on BROKEN select IOMMU_API help Support for the IOMMUs found on certain Qualcomm SOCs. diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { static int __init exynos_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, sysmmu_of_match); + if (!np) + return 0; + + of_node_put(np); + lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); if (!lv2table_kmem_cache) { diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -56,7 +56,8 @@ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ * (d)->bits_per_level) + (d)->pg_shift) -#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) +#define ARM_LPAE_PAGES_PER_PGD(d) \ + DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) /* * Calculate the index at level l used to map virtual address a using the @@ -66,7 +67,7 @@ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ - (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) struct kmem_cache *p; const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ + struct device_node *np; + + np = of_find_matching_node(NULL, omap_iommu_of_match); + if (!np) + return 0; + + of_node_put(np); p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, iopte_cachep_ctor); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { static int __init rk_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, rk_iommu_dt_ids); + if (!np) + return 0; + + of_node_put(np); + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); if (ret) return ret; diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; static void __iomem *main_int_base; static struct irq_domain *armada_370_xp_mpic_domain; static u32 doorbell_mask_reg; +static int parent_irq; #ifdef CONFIG_PCI_MSI static struct irq_domain *armada_370_xp_msi_domain; static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); @@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, { if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) armada_xp_mpic_smp_cpu_init(); + return NOTIFY_OK; } @@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { .priority = 100, }; +static int mpic_cascaded_secondary_init(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + + return NOTIFY_OK; +} + +static struct notifier_block mpic_cascaded_cpu_notifier = { + .notifier_call = mpic_cascaded_secondary_init, + .priority = 100, +}; + #endif /* CONFIG_SMP */ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { @@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { struct resource main_int_res, per_cpu_int_res; - int parent_irq, nr_irqs, i; + int nr_irqs, i; u32 control; BUG_ON(of_address_to_resource(node, 0, &main_int_res)); @@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); #endif } else { +#ifdef CONFIG_SMP + register_cpu_notifier(&mpic_cascaded_cpu_notifier); +#endif irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, { struct its_cmd_block *cmd, *sync_cmd, *next_cmd; struct its_collection *sync_col; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); cmd = its_allocate_entry(its); if (!cmd) { /* We're soooooo screewed... */ pr_err_ratelimited("ITS can't allocate, dropping command\n"); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return; } sync_col = builder(cmd, desc); @@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, post: next_cmd = its_post_commands(its); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); its_wait_for_range_completion(its, cmd, next_cmd); } @@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) { int err; int i; - int psz = PAGE_SIZE; + int psz = SZ_64K; u64 shr = GITS_BASER_InnerShareable; for (i = 0; i < GITS_BASER_NR_REGS; i++) { u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); u64 type = GITS_BASER_TYPE(val); u64 entry_size = GITS_BASER_ENTRY_SIZE(val); + int order = get_order(psz); + int alloc_size; u64 tmp; void *base; if (type == GITS_BASER_TYPE_NONE) continue; - /* We're lazy and only allocate a single page for now */ - base = (void *)get_zeroed_page(GFP_KERNEL); + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory. + * + * For other tables, only allocate a single page. + */ + if (type == GITS_BASER_TYPE_DEVICE) { + u64 typer = readq_relaxed(its->base + GITS_TYPER); + u32 ids = GITS_TYPER_DEVBITS(typer); + + order = get_order((1UL << ids) * entry_size); + if (order >= MAX_ORDER) { + order = MAX_ORDER - 1; + pr_warn("%s: Device Table too large, reduce its page order to %u\n", + its->msi_chip.of_node->full_name, order); + } + } + + alloc_size = (1 << order) * PAGE_SIZE; + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!base) { err = -ENOMEM; goto out_free; @@ -841,7 +864,7 @@ retry_baser: break; } - val |= (PAGE_SIZE / psz) - 1; + val |= (alloc_size / psz) - 1; writeq_relaxed(val, its->base + GITS_BASER + i * 8); tmp = readq_relaxed(its->base + GITS_BASER + i * 8); @@ -882,7 +905,7 @@ retry_baser: } pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", - (int)(PAGE_SIZE / entry_size), + (int)(alloc_size / entry_size), its_base_type_string[type], (unsigned long)virt_to_phys(base), psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); @@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_for_each_entry(tmp, &its->its_device_list, entry) { if (tmp->device_id == dev_id) { @@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) } } - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return its_dev; } @@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, { struct its_device *dev; unsigned long *lpi_map; + unsigned long flags; void *itt; int lpi_base; int nr_lpis; @@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2UL, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kmalloc(sz, GFP_KERNEL); + itt = kzalloc(sz, GFP_KERNEL); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); if (!dev || !itt || !lpi_map) { @@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_add(&dev->entry, &its->its_device_list); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); /* Bind the device to the first possible CPU */ cpu = cpumask_first(cpu_online_mask); @@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static void its_free_device(struct its_device *its_dev) { - raw_spin_lock(&its_dev->its->lock); + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); list_del(&its_dev->entry); - raw_spin_unlock(&its_dev->its->lock); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); kfree(its_dev->itt); kfree(its_dev); } @@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) return 0; } +struct its_pci_alias { + struct pci_dev *pdev; + u32 dev_id; + u32 count; +}; + +static int its_pci_msi_vec_count(struct pci_dev *pdev) +{ + int msi, msix; + + msi = max(pci_msi_vec_count(pdev), 0); + msix = max(pci_msix_vec_count(pdev), 0); + + return max(msi, msix); +} + +static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct its_pci_alias *dev_alias = data; + + dev_alias->dev_id = alias; + if (pdev != dev_alias->pdev) + dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + + return 0; +} + static int its_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct pci_dev *pdev; struct its_node *its; - u32 dev_id; struct its_device *its_dev; + struct its_pci_alias dev_alias; if (!dev_is_pci(dev)) return -EINVAL; pdev = to_pci_dev(dev); - dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + dev_alias.pdev = pdev; + dev_alias.count = nvec; + + pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); its = domain->parent->host_data; - its_dev = its_find_device(its, dev_id); - if (WARN_ON(its_dev)) - return -EINVAL; + its_dev = its_find_device(its, dev_alias.dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); + goto out; + } - its_dev = its_create_device(its, dev_id, nvec); + its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); if (!its_dev) return -ENOMEM; - dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); - + dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", + dev_alias.count, ilog2(dev_alias.count)); +out: info->scratchpad[0].ptr = its_dev; info->scratchpad[1].ptr = dev; return 0; @@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { .deactivate = its_irq_domain_deactivate, }; +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~GITS_CTLR_ENABLE; + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + static int its_probe(struct device_node *node, struct irq_domain *parent) { struct resource res; @@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) goto out_unmap; } + err = its_force_quiescent(its_base); + if (err) { + pr_warn("%s: failed to quiesce, giving up\n", + node->full_name); + goto out_unmap; + } + pr_info("ITS: %s\n", node->full_name); its = kzalloc(sizeof(*its), GFP_KERNEL); @@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) writeq_relaxed(baser, its->base + GITS_CBASER); tmp = readq_relaxed(its->base + GITS_CBASER); writeq_relaxed(0, its->base + GITS_CWRITER); - writel_relaxed(1, its->base + GITS_CTLR); + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { pr_info("ITS: using cache flushing for cmd queue\n"); @@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) int its_cpu_init(void) { - if (!gic_rdists_supports_plpis()) { - pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); - return -ENXIO; - } - if (!list_empty(&its_nodes)) { + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } its_cpu_init_lpis(); its_cpu_init_collection(); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, tlist |= 1 << (mpidr & 0xf); cpu = cpumask_next(cpu, mask); - if (cpu == nr_cpu_ids) + if (cpu >= nr_cpu_ids) goto out; mpidr = cpu_logical_map(cpu); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_mask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); if (gic_arch_extn.irq_mask) gic_arch_extn.irq_mask(d); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_unmask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_unmask) gic_arch_extn.irq_unmask(d); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_eoi_irq(struct irq_data *d) @@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); unsigned int gicirq = gic_irq(d); + unsigned long flags; int ret; /* Interrupt configuration for SGIs can't be changed */ @@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_set_type) gic_arch_extn.irq_set_type(d, type); ret = gic_configure_irq(gicirq, type, base, NULL); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return ret; } @@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int cpu, shift = (gic_irq(d) % 4) * 8; u32 val, mask, bit; + unsigned long flags; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return IRQ_SET_MASK_OK; } diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 6a7447c304ac..358a574d9e8b 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1609,7 +1609,7 @@ icn_setup(char *line) if (ints[0] > 1) membase = (unsigned long)ints[2]; if (str && *str) { - strcpy(sid, str); + strlcpy(sid, str, sizeof(sid)); icn_id = sid; if ((p = strchr(sid, ','))) { *p++ = 0; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 37de0173b6d2..74adcd2c967e 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, struct request_queue *q = bdev_get_queue(where->bdev); unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; + unsigned int uninitialized_var(special_cmd_max_sectors); - /* Reject unsupported discard requests */ - if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { + /* + * Reject unsupported discard and write same requests. + */ + if (rw & REQ_DISCARD) + special_cmd_max_sectors = q->limits.max_discard_sectors; + else if (rw & REQ_WRITE_SAME) + special_cmd_max_sectors = q->limits.max_write_same_sectors; + if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { dec_count(io, region, -EOPNOTSUPP); return; } @@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { - num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { @@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, */ dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); - num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; offset = 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 8b204ae216ab..f83a0f3fc365 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -20,6 +20,8 @@ #include <linux/log2.h> #include <linux/dm-kcopyd.h> +#include "dm.h" + #include "dm-exception-store.h" #define DM_MSG_PREFIX "snapshots" @@ -291,12 +293,23 @@ struct origin { }; /* + * This structure is allocated for each origin target + */ +struct dm_origin { + struct dm_dev *dev; + struct dm_target *ti; + unsigned split_boundary; + struct list_head hash_list; +}; + +/* * Size of the hash table for origin volumes. If we make this * the size of the minors list then it should be nearly perfect */ #define ORIGIN_HASH_SIZE 256 #define ORIGIN_MASK 0xFF static struct list_head *_origins; +static struct list_head *_dm_origins; static struct rw_semaphore _origins_lock; static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); @@ -310,12 +323,22 @@ static int init_origin_hash(void) _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), GFP_KERNEL); if (!_origins) { - DMERR("unable to allocate memory"); + DMERR("unable to allocate memory for _origins"); return -ENOMEM; } - for (i = 0; i < ORIGIN_HASH_SIZE; i++) INIT_LIST_HEAD(_origins + i); + + _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_dm_origins) { + DMERR("unable to allocate memory for _dm_origins"); + kfree(_origins); + return -ENOMEM; + } + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_dm_origins + i); + init_rwsem(&_origins_lock); return 0; @@ -324,6 +347,7 @@ static int init_origin_hash(void) static void exit_origin_hash(void) { kfree(_origins); + kfree(_dm_origins); } static unsigned origin_hash(struct block_device *bdev) @@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) list_add_tail(&o->hash_list, sl); } +static struct dm_origin *__lookup_dm_origin(struct block_device *origin) +{ + struct list_head *ol; + struct dm_origin *o; + + ol = &_dm_origins[origin_hash(origin)]; + list_for_each_entry (o, ol, hash_list) + if (bdev_equal(o->dev->bdev, origin)) + return o; + + return NULL; +} + +static void __insert_dm_origin(struct dm_origin *o) +{ + struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; + list_add_tail(&o->hash_list, sl); +} + +static void __remove_dm_origin(struct dm_origin *o) +{ + list_del(&o->hash_list); +} + /* * _origins_lock must be held when calling this function. * Returns number of snapshots registered using the supplied cow device, plus: @@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti) static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = ti->private; - struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; + struct dm_origin *o; + struct mapped_device *origin_md = NULL; + bool must_restart_merging = false; down_read(&_origins_lock); + + o = __lookup_dm_origin(s->origin->bdev); + if (o) + origin_md = dm_table_get_md(o->ti->table); + if (!origin_md) { + (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); + if (snap_merging) + origin_md = dm_table_get_md(snap_merging->ti->table); + } + if (origin_md == dm_table_get_md(ti->table)) + origin_md = NULL; + if (origin_md) { + if (dm_hold(origin_md)) + origin_md = NULL; + } + + up_read(&_origins_lock); + + if (origin_md) { + dm_internal_suspend_fast(origin_md); + if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { + must_restart_merging = true; + stop_merge(snap_merging); + } + } + + down_read(&_origins_lock); + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest) { down_write(&snap_src->lock); @@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti) up_write(&snap_dest->lock); up_write(&snap_src->lock); } + up_read(&_origins_lock); + if (origin_md) { + if (must_restart_merging) + start_merge(snap_merging); + dm_internal_resume_fast(origin_md); + dm_put(origin_md); + } + /* Now we have correct chunk size, reregister */ reregister_snapshot(s); @@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, * Origin: maps a linear range of a device, with hooks for snapshotting. */ -struct dm_origin { - struct dm_dev *dev; - unsigned split_boundary; -}; - /* * Construct an origin mapping: <dev_path> * The context for an origin is merely a 'struct dm_dev *' @@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_open; } + o->ti = ti; ti->private = o; ti->num_flush_bios = 1; @@ -2180,6 +2263,7 @@ bad_alloc: static void origin_dtr(struct dm_target *ti) { struct dm_origin *o = ti->private; + dm_put_device(ti, o->dev); kfree(o); } @@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti) struct dm_origin *o = ti->private; o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); + + down_write(&_origins_lock); + __insert_dm_origin(o); + up_write(&_origins_lock); +} + +static void origin_postsuspend(struct dm_target *ti) +{ + struct dm_origin *o = ti->private; + + down_write(&_origins_lock); + __remove_dm_origin(o); + up_write(&_origins_lock); } static void origin_status(struct dm_target *ti, status_type_t type, @@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti, static struct target_type origin_target = { .name = "snapshot-origin", - .version = {1, 8, 1}, + .version = {1, 9, 0}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, .map = origin_map, .resume = origin_resume, + .postsuspend = origin_postsuspend, .status = origin_status, .merge = origin_merge, .iterate_devices = origin_iterate_devices, @@ -2271,7 +2369,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 654773cb1eee..921aafd12aee 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; case -ENODATA: - if (get_pool_mode(tc->pool) == PM_READ_ONLY) { - /* - * This block isn't provisioned, and we have no way - * of doing so. - */ - handle_unserviceable_bio(tc->pool, bio); - cell_defer_no_holder(tc, virt_cell); - return DM_MAPIO_SUBMITTED; - } - /* fall through */ - case -EWOULDBLOCK: thin_defer_cell(tc, virt_cell); return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 73f28802dc7a..8001fe9e3434 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) dm_get(md); atomic_inc(&md->open_count); - out: spin_unlock(&_minor_lock); @@ -442,16 +441,20 @@ out: static void dm_blk_close(struct gendisk *disk, fmode_t mode) { - struct mapped_device *md = disk->private_data; + struct mapped_device *md; spin_lock(&_minor_lock); + md = disk->private_data; + if (WARN_ON(!md)) + goto out; + if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); - +out: spin_unlock(&_minor_lock); } @@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md) int minor = MINOR(disk_devt(md->disk)); unlock_fs(md); - bdput(md->bdev); destroy_workqueue(md->wq); if (md->kworker_task) @@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md) mempool_destroy(md->rq_pool); if (md->bs) bioset_free(md->bs); - blk_integrity_unregister(md->disk); - del_gendisk(md->disk); + cleanup_srcu_struct(&md->io_barrier); free_table_devices(&md->table_devices); - free_minor(minor); + dm_stats_cleanup(&md->stats); spin_lock(&_minor_lock); md->disk->private_data = NULL; spin_unlock(&_minor_lock); - + if (blk_get_integrity(md->disk)) + blk_integrity_unregister(md->disk); + del_gendisk(md->disk); put_disk(md->disk); blk_cleanup_queue(md->queue); - dm_stats_cleanup(&md->stats); + bdput(md->bdev); + free_minor(minor); + module_put(THIS_MODULE); kfree(md); } @@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md) BUG_ON(test_bit(DMF_FREEING, &md->flags)); } +int dm_hold(struct mapped_device *md) +{ + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags)) { + spin_unlock(&_minor_lock); + return -EBUSY; + } + dm_get(md); + spin_unlock(&_minor_lock); + return 0; +} +EXPORT_SYMBOL_GPL(dm_hold); + const char *dm_device_name(struct mapped_device *md) { return md->name; @@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait) might_sleep(); - spin_lock(&_minor_lock); map = dm_get_live_table(md, &srcu_idx); + + spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); @@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (dm_request_based(md)) flush_kthread_worker(&md->kworker); + /* + * Take suspend_lock so that presuspend and postsuspend methods + * do not race with internal suspend. + */ + mutex_lock(&md->suspend_lock); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } + mutex_unlock(&md->suspend_lock); /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); @@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md) flush_workqueue(md->wq); dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); } +EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); void dm_internal_resume_fast(struct mapped_device *md) { @@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md) done: mutex_unlock(&md->suspend_lock); } +EXPORT_SYMBOL_GPL(dm_internal_resume_fast); /*----------------------------------------------------------------- * Event notification. diff --git a/drivers/md/md.c b/drivers/md/md.c index cadf9cc02b25..717daad71fb1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev) } if (err) { mddev_detach(mddev); - pers->free(mddev, mddev->private); + if (mddev->private) + pers->free(mddev, mddev->private); module_put(pers->owner); bitmap_destroy(mddev); return err; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a13f738a7b39..3ed9f42ddca6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev) dump_zones(mddev); ret = md_integrity_register(mddev); - if (ret) - raid0_free(mddev, conf); return ret; } diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f38ec424872e..5615522f8d62 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -739,7 +739,7 @@ static int __init kempld_init(void) for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++) if (strstr(id->ident, force_device_id)) - if (id->callback && id->callback(id)) + if (id->callback && !id->callback(id)) break; if (id->matches[0].slot == DMI_NONE) return -ENODEV; diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index ede50244f265..dbd907d7170e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register); int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) { u16 value; + u8 *buf; + int ret; if (!data) return -EINVAL; - *data = 0; + + buf = kzalloc(sizeof(u8), GFP_KERNEL); + if (!buf) + return -ENOMEM; addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; value = swab16(addr); - return usb_control_msg(ucr->pusb_dev, + ret = usb_control_msg(ucr->pusb_dev, usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, 0, data, 1, 100); + value, 0, buf, 1, 100); + *data = *buf; + + kfree(buf); + return ret; } EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); @@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) { int ret; + u16 *buf; if (!status) return -EINVAL; - if (polling_pipe == 0) + if (polling_pipe == 0) { + buf = kzalloc(sizeof(u16), GFP_KERNEL); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(ucr->pusb_dev, usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_POLL, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, 0, status, 2, 100); - else + 0, 0, buf, 2, 100); + *status = *buf; + + kfree(buf); + } else { ret = rtsx_usb_get_status_with_bulk(ucr, status); + } /* usb_control_msg may return positive when success */ if (ret < 0) diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index e9f1d8d84613..c53f14a7ce54 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev) PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { ret = PTR_ERR(pwrseq->reset_gpios[i]); - while (--i) + while (i--) gpiod_put(pwrseq->reset_gpios[i]); goto clk_put; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -526,6 +526,7 @@ config MTD_NAND_SUNXI config MTD_NAND_HISI504 tristate "Support for NAND controller on Hisilicon SoC Hip04" + depends on HAS_DMA help Enables support for NAND controller on Hisilicon SoC Hip04. diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) nand_writel(info, NDCR, ndcr | int_mask); } +static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) +{ + if (info->ecc_bch) { + int timeout; + + /* + * According to the datasheet, when reading from NDDB + * with BCH enabled, after each 32 bytes reads, we + * have to make sure that the NDSR.RDDREQ bit is set. + * + * Drain the FIFO 8 32 bits reads at a time, and skip + * the polling on the last read. + */ + while (len > 8) { + __raw_readsl(info->mmio_base + NDDB, data, 8); + + for (timeout = 0; + !(nand_readl(info, NDSR) & NDSR_RDDREQ); + timeout++) { + if (timeout >= 5) { + dev_err(&info->pdev->dev, + "Timeout on RDDREQ while draining the FIFO\n"); + return; + } + + mdelay(1); + } + + data += 32; + len -= 8; + } + } + + __raw_readsl(info->mmio_base + NDDB, data, len); +} + static void handle_data_pio(struct pxa3xx_nand_info *info) { unsigned int do_bytes = min(info->data_size, info->chunk_size); @@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) DIV_ROUND_UP(info->oob_size, 4)); break; case STATE_PIO_READING: - __raw_readsl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(do_bytes, 4)); if (info->oob_size > 0) - __raw_readsl(info->mmio_base + NDDB, - info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + drain_fifo(info, + info->oob_buff + info->oob_buff_pos, + DIV_ROUND_UP(info->oob_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, @@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) int ret, irq, cs; pdata = dev_get_platdata(&pdev->dev); + if (pdata->num_cs <= 0) + return -ENODEV; info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); if (!info) diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index da4c79259f67..16e34b37d134 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -425,9 +425,10 @@ retry: ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", pnum, vol_id, lnum); err = -EBADMSG; - } else + } else { err = -EINVAL; ubi_ro_mode(ubi); + } } goto out_free; } else if (err == UBI_IO_BITFLIPS) diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 98d73aab52fe..58808f651452 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -131,7 +131,7 @@ config CAN_RCAR config CAN_XILINXCAN tristate "Xilinx CAN" - depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST + depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST depends on COMMON_CLK && HAS_IOMEM ---help--- Xilinx CAN driver. This driver supports both soft AXI CAN IP and diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; @@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..e97a08ce0b90 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -14,6 +14,8 @@ * Copyright (C) 2015 Valeo S.A. */ +#include <linux/spinlock.h> +#include <linux/kernel.h> #include <linux/completion.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -466,10 +468,11 @@ struct kvaser_usb { struct kvaser_usb_net_priv { struct can_priv can; - atomic_t active_tx_urbs; - struct usb_anchor tx_submitted; + spinlock_t tx_contexts_lock; + int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; + struct usb_anchor tx_submitted; struct completion start_comp, stop_comp; struct kvaser_usb *dev; @@ -584,8 +587,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, while (pos <= actual_len - MSG_HEADER_LEN) { tmp = buf + pos; - if (!tmp->len) - break; + /* Handle messages crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + tmp->len > actual_len) { dev_err(dev->udev->dev.parent, @@ -686,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; struct sk_buff *skb; struct can_frame *cf; + unsigned long flags; u8 channel, tid; channel = msg->u.tx_acknowledge_header.channel; @@ -729,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, stats->tx_packets++; stats->tx_bytes += context->dlc; - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = MAX_TX_URBS; - atomic_dec(&priv->active_tx_urbs); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static void kvaser_usb_simple_msg_callback(struct urb *urb) @@ -787,7 +801,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); usb_free_urb(urb); - kfree(buf); return err; } @@ -796,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, return 0; } -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - int i; - - usb_kill_anchored_urbs(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; -} - static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_error_summary *es, struct can_frame *cf) @@ -1317,8 +1319,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) while (pos <= urb->actual_length - MSG_HEADER_LEN) { msg = urb->transfer_buffer + pos; - if (!msg->len) - break; + /* The Kvaser firmware can only read and write messages that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (msg->len == 0) { + pos = round_up(pos, dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + msg->len > urb->actual_length) { dev_err(dev->udev->dev.parent, "Format error\n"); @@ -1326,7 +1339,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } kvaser_usb_handle_message(dev, msg); - pos += msg->len; } @@ -1498,6 +1510,24 @@ error: return err; } +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i; + + priv->active_tx_contexts = 0; + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) { int i; @@ -1615,9 +1645,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct urb *urb; void *buf; struct kvaser_msg *msg; - int i, err; - int ret = NETDEV_TX_OK; + int i, err, ret = NETDEV_TX_OK; u8 *msg_tx_can_flags = NULL; /* GCC */ + unsigned long flags; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; @@ -1634,7 +1664,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); - goto nobufmem; + goto freeurb; } msg = buf; @@ -1671,22 +1701,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= MAX_TX_URBS) + netif_stop_queue(netdev); + break; } } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); + + kfree(buf); ret = NETDEV_TX_BUSY; - goto releasebuf; + goto freeurb; } context->priv = priv; - context->echo_index = i; context->dlc = cf->can_dlc; msg->u.tx_can.tid = context->echo_index; @@ -1698,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); - - atomic_inc(&priv->active_tx_urbs); - - if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) - netif_stop_queue(netdev); - err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); stats->tx_dropped++; @@ -1719,16 +1758,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, else netdev_warn(netdev, "Failed tx_urb %d\n", err); - goto releasebuf; + goto freeurb; } - usb_free_urb(urb); - - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; -releasebuf: - kfree(buf); -nobufmem: +freeurb: usb_free_urb(urb); return ret; } @@ -1840,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct kvaser_usb_net_priv *priv; - int i, err; + int err; err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); if (err) @@ -1854,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf, priv = netdev_priv(netdev); + init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); - init_usb_anchor(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->dev = dev; priv->netdev = netdev; priv->channel = channel; + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = CAN_USB_CLOCK; priv->can.bittiming_const = &kvaser_usb_bittiming_const; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) pdev->usb_if = ppdev->usb_if; pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; + + /* do a copy of the ctrlmode[_supported] too */ + dev->can.ctrlmode = ppdev->dev.can.ctrlmode; + dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; } pdev->usb_if->dev[dev->ctrl_idx] = dev; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..15a8190a6f75 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; - int fdx, mii, fset, dxsuflo; + int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; @@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; + fdx = mii = fset = dxsuflo = sram = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { @@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ @@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; @@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dxsuflo = 1; } + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!efi_enabled(EFI_BOOT)) { + if (pdata->clk) { clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", }, + { "APMC0D30", }, + { "APMC0D31", }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); @@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #ifdef CONFIG_OF static struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet",}, + {.compatible = "apm,xgene1-sgenet",}, + {.compatible = "apm,xgene1-xgenet",}, {}, }; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; - int tx_work_done, rx_work_done; + int rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; @@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ - tx_work_done = bcm_enet_tx_reclaim(dev, 0); + bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); - if (rx_work_done >= budget || tx_work_done > 0) { - /* rx/tx queue is not yet empty/clean */ + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ return rx_work_done; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, slot->skb = skb; slot->dma_addr = dma_addr; - if (slot->dma_addr & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); - return 0; } @@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) ring->mmio_base); goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); @@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) err = -ENOMEM; goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..996e215fc324 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + /* AER (Advanced Error reporting) configuration */ rc = pci_enable_pcie_error_reporting(pdev); if (!rc) @@ -12766,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; - if (!CHIP_IS_E1x(bp)) { + if (!chip_is_e1x) { dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE) { bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), UMAC_MPD_PW_MS); bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), UMAC_MPD_PW_LS); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_PW_EN; - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } else { + reg &= ~MPD_PW_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { }; #if defined(CONFIG_OF) -static struct macb_config pc302gem_config = { +static const struct macb_config pc302gem_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d3_config = { +static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d4_config = { +static const struct macb_config sama5d4_config = { .caps = 0, .dma_burst_length = 4, }; @@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) if (bp->pdev->dev.of_node) { match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); if (match && match->data) { - config = (const struct macb_config *)match->data; + config = match->data; bp->caps = config->caps; /* diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -351,7 +351,7 @@ /* Bitfields in MID */ #define MACB_IDNUM_OFFSET 16 -#define MACB_IDNUM_SIZE 16 +#define MACB_IDNUM_SIZE 12 #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 853c38997c82..1abdfa123c6c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, } /* Installed successfully, update the cached header too. */ - memcpy(card_fw, fs_fw, sizeof(*card_fw)); + *card_fw = *fs_fw; card_fw_usable = 1; *reset = 0; /* already reset as part of load_fw */ } diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); - for (j = 0; buf[j] != 0xee && j < 1600; j++) + for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) if (j < 100) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..27b9fe99a9bd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -354,6 +354,7 @@ struct be_vf_cfg { u16 vlan_tag; u32 tx_rate; u32 plink_tracking; + u32 privileges; }; enum vf_state { @@ -423,6 +424,7 @@ struct be_adapter { u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..7f05f309e935 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, { int num_eqs, i = 0; - if (lancer_chip(adapter) && num > 8) { - while (num) { - num_eqs = min(num, 8); - __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); - i += num_eqs; - num -= num_eqs; - } - } else { - __be_cmd_modify_eqd(adapter, set_eqd, num); + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; } return 0; @@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num) + u32 num, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); + req->hdr.domain = domain; req->interface_id = if_id; req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..a7634a3f052a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, int be_cmd_get_fw_ver(struct be_adapter *adapter); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num); + u32 num, u32 domain); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..e6b790f0d9dc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter) for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); - status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); if (status) { dev_err(dev, "Setting HW VLAN filtering failed\n"); /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + int vf_if_id = vf_cfg->if_handle; + int status; + + /* Enable Transparent VLAN Tagging */ + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); + if (status) + return status; + + /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ + vids[0] = 0; + status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); + if (!status) + dev_info(&adapter->pdev->dev, + "Cleared guest VLANs on VF%d", vf); + + /* After TVT is enabled, disallow VFs to program VLAN filters */ + if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & + ~BE_PRIV_FILTMGMT, vf + 1); + if (!status) + vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; + } + return 0; +} + +static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + struct device *dev = &adapter->pdev->dev; + int status; + + /* Reset Transparent VLAN Tagging. */ + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, + vf_cfg->if_handle, 0); + if (status) + return status; + + /* Allow VFs to program VLAN filtering */ + if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | + BE_PRIV_FILTMGMT, vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); + } + } + + dev_info(dev, + "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); + return 0; +} + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - int status = 0; + int status; if (!sriov_enabled(adapter)) return -EPERM; @@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; - if (vf_cfg->vlan_tag != vlan) - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - vf_cfg->if_handle, 0); + status = be_set_vf_tvt(adapter, vf, vlan); } else { - /* Reset Transparent Vlan Tagging. */ - status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, - vf + 1, vf_cfg->if_handle, 0); + status = be_clear_vf_tvt(adapter, vf); } if (status) { dev_err(&adapter->pdev->dev, - "VLAN %d config on VF %d failed : %#x\n", vlan, - vf, status); + "VLAN %d config on VF %d failed : %#x\n", vlan, vf, + status); return be_cmd_status(status); } vf_cfg->vlan_tag = vlan; - return 0; } @@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter) } } } else { - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW, &ue_lo); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HIGH, &ue_hi); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); + ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); + ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); + ue_lo_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_LOW_MASK); + ue_hi_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_HI_MASK); ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); @@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, u32 cap_flags, u32 vf) { u32 en_flags; - int status; en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | @@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, en_flags &= cap_flags; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - if_handle, vf); - - return status; + return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); } static int be_vfs_if_create(struct be_adapter *adapter) @@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) if (!BE3_chip(adapter)) { status = be_cmd_get_profile_config(adapter, &res, vf + 1); - if (!status) + if (!status) { cap_flags = res.if_cap_flags; + /* Prevent VFs from enabling VLAN promiscuous + * mode + */ + cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } } status = be_if_create(adapter, &vf_cfg->if_handle, @@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter) struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; int status, old_vfs, vf; - u32 privileges; old_vfs = pci_num_vf(adapter->pdev); @@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { /* Allow VFs to programs MAC/VLAN filters */ - status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); - if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, + vf + 1); + if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { status = be_cmd_set_fn_privileges(adapter, - privileges | + vf_cfg->privileges | BE_PRIV_FILTMGMT, vf + 1); - if (!status) + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; dev_info(dev, "VF%d has FILTMGMT privilege\n", vf); + } } /* Allow full available bandwidth */ @@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; u8 __iomem *addr; if (BEx_chip(adapter) && be_physfn(adapter)) { - adapter->csr = pci_iomap(adapter->pdev, 2, 0); + adapter->csr = pci_iomap(pdev, 2, 0); if (!adapter->csr) return -ENOMEM; } - addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); + addr = pci_iomap(pdev, db_bar(adapter), 0); if (!addr) goto pci_map_err; adapter->db = addr; + if (skyhawk_chip(adapter) || BEx_chip(adapter)) { + if (be_physfn(adapter)) { + /* PCICFG is the 2nd BAR in BE2 */ + addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); + if (!addr) + goto pci_map_err; + adapter->pcicfg = addr; + } else { + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + } + } + be_roce_map_pci_bars(adapter); return 0; pci_map_err: - dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); + dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..78e1ce09b1ab 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1189,13 +1189,12 @@ static void fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; - struct bufdesc *bdp, *bdp_t; + struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct netdev_queue *nq; int index = 0; - int i, bdnum; int entries_free; fep = netdev_priv(ndev); @@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (bdp == txq->cur_tx) break; - bdp_t = bdp; - bdnum = 1; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - while (!skb) { - bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - bdnum++; - } - if (skb_shinfo(skb)->nr_frags && - (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) - break; + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - for (i = 0; i < bdnum; i++) { - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - if (i < bdnum - 1) - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - } + skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + continue; + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) vlan_packet_rcvd = true; - skb_copy_to_linear_data_offset(skb, VLAN_HLEN, - data, (2 * ETH_ALEN)); + memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } @@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id) writel(int_events, fep->hwp + FEC_IEVENT); fec_enet_collect_events(fep, int_events); - if (fep->work_tx || fep->work_rx) { + if ((fep->work_tx || fep->work_rx) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -3383,7 +3370,6 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - fec_enet_clk_enable(ndev, false); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 178e54028d10..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, return 0; } +static int gfar_of_group_count(struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (!of_node_cmp(child->name, "queue-group")) + num++; + + return num; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; @@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_rx_qs = 1; } else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ - unsigned int num_grps = of_get_available_child_count(np); + unsigned int num_grps = gfar_of_group_count(np); if (num_grps == 0 || num_grps > MAXGROUPS) { dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", @@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) { - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "queue-group")) + continue; + err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 072426a72745..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1136,6 +1136,8 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { + napi_complete(napi); + /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ @@ -1144,8 +1146,6 @@ restart_poll: BUG_ON(lpar_rc != H_SUCCESS); - napi_complete(napi); - if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..ebce5bb24df9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); - mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); - #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); @@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); + mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); + return 0; out: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e6be15..ebbe244e80dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -453,7 +453,7 @@ struct mlx4_en_port_stats { unsigned long rx_chksum_none; unsigned long rx_chksum_complete; unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 9 +#define NUM_PORT_STATS 10 }; struct mlx4_en_perf_stats { diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 209ee1b27f8d..8678e39aba08 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -92,6 +92,7 @@ static const char version[] = #include "smc91x.h" #if defined(CONFIG_ASSABET_NEPONSET) +#include <mach/assabet.h> #include <mach/neponset.h> #endif @@ -2247,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res; + struct resource *res, *ires; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; - unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2342,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; goto out_release_io; } - /* - * If this platform does not specify any special irqflags, or if - * the resource supplies a trigger, override the irqflags with - * the trigger flags from the resource. - */ - irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); - if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) - irq_flags = irq_resflags & IRQF_TRIGGER_MASK; + + ndev->irq = ires->start; + + if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) + irq_flags = ires->flags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; + int irq, wol_irq, lpi_irq; + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + irq = platform_get_irq_byname(pdev, "macirq"); + if (irq < 0) { + if (irq != -EPROBE_DEFER) { + dev_err(dev, + "MAC IRQ configuration information not found\n"); + } + return irq; + } + + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (wol_irq < 0) { + if (wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + wol_irq = irq; + } + + lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(priv); } + /* Copy IRQ values to priv structure which is now avaialble */ + priv->dev->irq = irq; + priv->wol_irq = wol_irq; + priv->lpi_irq = lpi_irq; + /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); - /* Get the MAC information */ - priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); - if (priv->dev->irq < 0) { - if (priv->dev->irq != -EPROBE_DEFER) { - netdev_err(priv->dev, - "MAC IRQ configuration information not found\n"); - } - return priv->dev->irq; - } - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq < 0) { - if (priv->wol_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - priv->wol_irq = priv->dev->irq; - } - - priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (priv->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931a66a1..0e0fbb5842b3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5100_write(priv, W5100_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9db578..4b310002258d 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); - rcu_read_unlock(); + mutex_unlock(&team->lock); return 0; } diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 3eed708a6182..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -46,8 +46,7 @@ enum cx82310_status { }; #define CMD_PACKET_SIZE 64 -/* first command after power on can take around 8 seconds */ -#define CMD_TIMEOUT 15000 +#define CMD_TIMEOUT 100 #define CMD_REPLY_RETRY 5 #define CX82310_MTU 1514 @@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { - dev_err(&dev->udev->dev, "send command %#x: error %d\n", - cmd, ret); + if (cmd != CMD_GET_LINK_STATUS) + dev_err(&dev->udev->dev, "send command %#x: error %d\n", + cmd, ret); goto end; } @@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { - dev_err(&dev->udev->dev, - "reply receive error %d\n", ret); + if (cmd != CMD_GET_LINK_STATUS) + dev_err(&dev->udev->dev, + "reply receive error %d\n", + ret); goto end; } if (actual_len > 0) @@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) int ret; char buf[15]; struct usb_device *udev = dev->udev; + u8 link[3]; + int timeout = 50; /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 @@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) if (!dev->partial_data) return -ENOMEM; + /* wait for firmware to become ready (indicated by the link being up) */ + while (--timeout) { + ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, + link, sizeof(link)); + /* the command can time out during boot - it's not an error */ + if (!ret && link[0] == 1 && link[2] == 1) + break; + msleep(500); + }; + if (!timeout) { + dev_err(&udev->dev, "firmware not ready in time\n"); + return -ETIMEDOUT; + } + /* enable ethernet mode (?) */ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); if (ret) { @@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = { .tx_fixup = cx82310_tx_fixup, }; +#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_DEV_INFO, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bDeviceClass = (cl), \ + .bDeviceSubClass = (sc), \ + .bDeviceProtocol = (pr) + static const struct usb_device_id products[] = { { - USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff3666f090..59b0e9754ae3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + } kfree(vi->rq); kfree(vi->sq); @@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); - napi_hash_del(&vi->rq[i].napi); - netif_napi_del(&vi->rq[i].napi); - } } remove_vq_common(vi); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1e0a775ea882..f8528a4cf54f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; flags &= ~VXLAN_HF_RCO; - vni &= VXLAN_VID_MASK; + vni &= VXLAN_VNI_MASK; } /* For backwards compatibility, only allow reserved fields to be @@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) flags &= ~VXLAN_GBP_USED_BITS; } - if (flags || (vni & ~VXLAN_VID_MASK)) { + if (flags || vni & ~VXLAN_VNI_MASK) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..75345c1e8c34 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, case 0x432a: /* BCM4321 */ case 0x432d: /* BCM4322 */ case 0x4352: /* BCM43222 */ + case 0x435a: /* BCM43228 */ case 0x4333: /* BCM4331 */ case 0x43a2: /* BCM4360 */ case 0x43b3: /* BCM4352 */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 50cdf7090198..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c @@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, void *dcmd_buf = NULL, *wr_pointer; u16 msglen, maxmsglen = PAGE_SIZE - 0x100; - brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, - cmdhdr->len); + if (len < sizeof(*cmdhdr)) { + brcmf_err("vendor command too short: %d\n", len); + return -EINVAL; + } vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); ifp = vif->ifp; - len -= sizeof(struct brcmf_vndr_dcmd_hdr); + brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); + + if (cmdhdr->offset > len) { + brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); + return -EINVAL; + } + + len -= cmdhdr->offset; ret_len = cmdhdr->len; if (ret_len > 0 || len > 0) { if (len > BRCMF_DCMD_MAXLEN) { diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c3817fae16c0..06f6cc08f451 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 21e5d0843a62..890b95f497d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + const struct iwl_cfg iwl2000_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", @@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 332bbede39e5..724194e23414 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 8f2c3c8c6b84..21b2630763dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = { .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ .base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 1ec4d55155f7..7810c41cf9a7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, + if (mvmvif->phy_ctxt && + IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d530ef3da107..542ee74f290a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) + if (mvmvif->phy_ctxt && + data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1ff7ec08532d..09654e73a533 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) + if ((mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - iwl_mvm_cancel_scan(mvm); + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a hw_scan when it's already stopped. This can + * happen, for instance, if we stopped the scan ourselves, + * called ieee80211_scan_completed() and the userspace called + * cancel scan scan before ieee80211_scan_work() could run. + * To handle that, simply return if the scan is not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if ((mvm->scan_status == IWL_MVM_SCAN_OS) || + (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_cancel_scan(mvm); mutex_unlock(&mvm->mutex); } @@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a sched_scan when it's already stopped. This + * can happen, for instance, if we stopped the scan ourselves, + * called ieee80211_sched_scan_stopped() and the userspace called + * stop sched scan scan before ieee80211_sched_scan_stopped_work() + * could run. To handle this, simply return if the scan is + * not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && + !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + mutex_unlock(&mvm->mutex); + return 0; + } + ret = iwl_mvm_scan_offload_stop(mvm, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; - } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 194bd1f939ca..efa9688a4cf1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -134,9 +134,12 @@ enum rs_column_mode { #define MAX_NEXT_COLUMNS 7 #define MAX_COLUMN_CHECKS 3 +struct rs_tx_column; + typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl); + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col); struct rs_tx_column { enum rs_column_mode mode; @@ -147,13 +150,15 @@ struct rs_tx_column { }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { - return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); + return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; @@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; @@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { struct rs_rate *rate = &tbl->rate; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; @@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, tbl)) + if (allow_func && !allow_func(mvm, sta, tbl, next_col)) break; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7e9aa3cb3254..c47c8051da77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_NONE) return 0; - if (iwl_mvm_is_radio_killed(mvm)) + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; goto out; + } if (mvm->scan_status != IWL_MVM_SCAN_SCHED && (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || @@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", sched ? "offloaded " : "", ret); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - return ret; + goto out; } IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", sched ? "offloaded " : ""); ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - if (ret) - return ret; - +out: /* * Clear the scan status so the next scan requests will succeed. This * also ensures the Rx handler doesn't do anything, as the scan was @@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_OS) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); -out: mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -1177,7 +1176,7 @@ out: ieee80211_scan_completed(mvm->hw, true); } - return 0; + return ret; } static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 54fafbf9a711..f8d6f306dd76 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && - te_data->running) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); is_p2p = true; goto remove_te; @@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { - if (te_data->running) { - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - goto remove_te; - } + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + goto remove_te; } remove_te: diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 1d4677460711..074f716020aa 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } return true; - } else if (0x86DD == ether_type) { - return true; + } else if (ETH_P_IPV6 == ether_type) { + /* TODO: Handle any IPv6 cases that need special handling. + * For now, always return false + */ + goto end; } end: diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, unsigned int num_queues = vif->num_queues; int i; unsigned int queue_index; - struct xenvif_stats *vif_stats; for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { unsigned long accum = 0; for (queue_index = 0; queue_index < num_queues; ++queue_index) { - vif_stats = &vif->queues[queue_index].stats; + void *vif_stats = &vif->queues[queue_index].stats; accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); } data[i] = accum; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st); +static void push_tx_responses(struct xenvif_queue *queue); static inline int tx_work_todo(struct xenvif_queue *queue); @@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue, unsigned long flags; do { - int notify; - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); - if (notify) - notify_remote_via_irq(queue->tx_irq); - if (cons == end) break; txp = RING_GET_REQUEST(&queue->tx, cons++); @@ -1349,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s { unsigned int offset = skb_headlen(skb); skb_frag_t frags[MAX_SKB_FRAGS]; - int i; + int i, f; struct ubuf_info *uarg; struct sk_buff *nskb = skb_shinfo(skb)->frag_list; @@ -1389,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s frags[i].page_offset = 0; skb_frag_size_set(&frags[i], len); } - /* swap out with old one */ - memcpy(skb_shinfo(skb)->frags, - frags, - i * sizeof(skb_frag_t)); - skb_shinfo(skb)->nr_frags = i; - skb->truesize += i * PAGE_SIZE; - /* remove traces of mapped pages and frag_list */ + /* Copied all the bits from the frag list -- free it. */ skb_frag_list_init(skb); + xenvif_skb_zerocopy_prepare(queue, nskb); + kfree_skb(nskb); + + /* Release all the original (foreign) frags. */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + skb_frag_unref(skb, f); uarg = skb_shinfo(skb)->destructor_arg; /* increase inflight counter to offset decrement in callback */ atomic_inc(&queue->inflight_packets); uarg->callback(uarg, true); skb_shinfo(skb)->destructor_arg = NULL; - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); + /* Fill the skb with the new (local) frags. */ + memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); + skb_shinfo(skb)->nr_frags = i; + skb->truesize += i * PAGE_SIZE; return 0; } @@ -1655,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, { struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; - int notify; unsigned long flags; pending_tx_info = &queue->pending_tx_info[pending_idx]; @@ -1671,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, index = pending_index(queue->pending_prod++); queue->pending_ring[index] = pending_idx; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); - - if (notify) - notify_remote_via_irq(queue->tx_irq); } @@ -1697,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue, queue->tx.rsp_prod_pvt = ++i; } +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + if (notify) + notify_remote_via_irq(queue->tx_irq); +} + static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -84,8 +84,7 @@ config OF_RESOLVE bool config OF_OVERLAY - bool - depends on OF + bool "Device Tree overlays" select OF_DYNAMIC select OF_RESOLVE diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..8f165b112e03 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, const char *path) { struct device_node *child; - int len = strchrnul(path, '/') - path; - int term; + int len; + len = strcspn(path, "/:"); if (!len) return NULL; - term = strchrnul(path, ':') - path; - if (term < len) - len = term; - __for_each_child_of_node(parent, child) { const char *name = strrchr(child->full_name, '/'); if (WARN(!name, "malformed device_node %s\n", child->full_name)) @@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt /* The path could begin with an alias */ if (*path != '/') { - char *p = strchrnul(path, '/'); - int len = separator ? separator - path : p - path; + int len; + const char *p = separator; + + if (!p) + p = strchrnul(path, '/'); + len = p - path; /* of_aliases must not be NULL */ if (!of_aliases) @@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt path++; /* Increment past '/' delimiter */ np = __of_find_node_by_path(np, path); path = strchrnul(path, '/'); + if (separator && separator < path) + break; } raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 0d7765807f49..1a7980692f25 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar struct device_node *p; const __be32 *intspec, *tmp, *addr; u32 intsize, intlen; - int i, res = -EINVAL; + int i, res; pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); @@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Get size of interrupt specifier */ tmp = of_get_property(p, "#interrupt-cells", NULL); - if (tmp == NULL) + if (tmp == NULL) { + res = -EINVAL; goto out; + } intsize = be32_to_cpu(*tmp); pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); /* Check index */ - if ((index + 1) * intsize > intlen) + if ((index + 1) * intsize > intlen) { + res = -EINVAL; goto out; + } /* Copy intspec into irq structure */ intspec += index * intsize; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -19,6 +19,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/idr.h> #include "of_private.h" @@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, struct device_node *target, struct device_node *child) { const char *cname; - struct device_node *tchild, *grandchild; + struct device_node *tchild; int ret = 0; cname = kbasename(child->full_name); diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..52c45c7df07f 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -92,6 +92,16 @@ static void __init of_selftest_find_node_by_name(void) "option path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:test/option", &options); + selftest(np && !strcmp("test/option", options), + "option path test, subcase #1 failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options); + selftest(np && !strcmp("test/option", options), + "option path test, subcase #2 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); selftest(np, "NULL option path test failed\n"); of_node_put(np); @@ -102,6 +112,12 @@ static void __init of_selftest_find_node_by_name(void) "option alias path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:test/alias/option", + &options); + selftest(np && !strcmp("test/alias/option", options), + "option alias path test, subcase #1 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); selftest(np, "NULL option alias path test failed\n"); of_node_put(np); @@ -378,9 +394,9 @@ static void __init of_selftest_property_string(void) rc = of_property_match_string(np, "phandle-list-names", "first"); selftest(rc == 0, "first expected:0 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "second"); - selftest(rc == 1, "second expected:0 got:%i\n", rc); + selftest(rc == 1, "second expected:1 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "third"); - selftest(rc == 2, "third expected:0 got:%i\n", rc); + selftest(rc == 2, "third expected:2 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "fourth"); selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); rc = of_property_match_string(np, "missing-property", "blah"); @@ -478,7 +494,6 @@ static void __init of_selftest_changeset(void) struct device_node *n1, *n2, *n21, *nremove, *parent, *np; struct of_changeset chgset; - of_changeset_init(&chgset); n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); selftest(n1, "testcase setup failure\n"); n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); @@ -979,7 +994,7 @@ static int of_path_platform_device_exists(const char *path) return pdev != NULL; } -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) /* get the i2c client device instantiated at the path */ static struct i2c_client *of_path_to_i2c_client(const char *path) @@ -1445,7 +1460,7 @@ static void of_selftest_overlay_11(void) return; } -#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) +#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) struct selftest_i2c_bus_data { struct platform_device *pdev; @@ -1584,7 +1599,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { .id_table = selftest_i2c_dev_id, }; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) struct selftest_i2c_mux_data { int nchans; @@ -1695,7 +1710,7 @@ static int of_selftest_overlay_i2c_init(void) "could not register selftest i2c bus driver\n")) return ret; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) ret = i2c_add_driver(&selftest_i2c_mux_driver); if (selftest(ret == 0, "could not register selftest i2c mux driver\n")) @@ -1707,7 +1722,7 @@ static int of_selftest_overlay_i2c_init(void) static void of_selftest_overlay_i2c_cleanup(void) { -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) i2c_del_driver(&selftest_i2c_mux_driver); #endif platform_driver_unregister(&selftest_i2c_bus_driver); @@ -1814,7 +1829,7 @@ static void __init of_selftest_overlay(void) of_selftest_overlay_10(); of_selftest_overlay_11(); -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) goto out; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) return false; } -static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, +static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { struct xgene_pcie_port *port = bus->sysdata; @@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return NULL; xgene_pcie_set_rtdid_reg(bus, devfn); - return xgene_pcie_get_cfg_base(bus); + return xgene_pcie_get_cfg_base(bus) + offset; } static struct pci_ops xgene_pcie_ops = { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, struct pci_dev *pdev = to_pci_dev(dev); char *driver_override, *old = pdev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", pdev->driver_override); + return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 3bb49252a098..45f67c63d385 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -69,8 +69,7 @@ config YENTA tristate "CardBus yenta-compatible bridge support" depends on PCI select CARDBUS if !EXPERT - select PCCARD_NONSTATIC if PCMCIA != n && ISA - select PCCARD_PCI if PCMCIA !=n && !ISA + select PCCARD_NONSTATIC if PCMCIA != n ---help--- This option enables support for CardBus host bridges. Virtually all modern PCMCIA bridges are CardBus compatible. A "bridge" is @@ -110,8 +109,7 @@ config YENTA_TOSHIBA config PD6729 tristate "Cirrus PD6729 compatible bridge support" depends on PCMCIA && PCI - select PCCARD_NONSTATIC if PCMCIA != n && ISA - select PCCARD_PCI if PCMCIA !=n && !ISA + select PCCARD_NONSTATIC help This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge device, found in some older laptops and PCMCIA card readers. @@ -119,8 +117,7 @@ config PD6729 config I82092 tristate "i82092 compatible bridge support" depends on PCMCIA && PCI - select PCCARD_NONSTATIC if PCMCIA != n && ISA - select PCCARD_PCI if PCMCIA !=n && !ISA + select PCCARD_NONSTATIC help This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, found in some older laptops and more commonly in evaluation boards for the @@ -291,9 +288,6 @@ config ELECTRA_CF Say Y here to support the CompactFlash controller on the PA Semi Electra eval board. -config PCCARD_PCI - bool - config PCCARD_NONSTATIC bool diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index f1a7ca04d89e..27e94b30cf96 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile @@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o pcmcia_rsrc-y += rsrc_mgr.o pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o -pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c deleted file mode 100644 index 1f67b3ba70fb..000000000000 --- a/drivers/pcmcia/rsrc_pci.c +++ /dev/null @@ -1,173 +0,0 @@ -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/pci.h> - -#include <pcmcia/ss.h> -#include <pcmcia/cistpl.h> -#include "cs_internal.h" - - -struct pcmcia_align_data { - unsigned long mask; - unsigned long offset; -}; - -static resource_size_t pcmcia_align(void *align_data, - const struct resource *res, - resource_size_t size, resource_size_t align) -{ - struct pcmcia_align_data *data = align_data; - resource_size_t start; - - start = (res->start & ~data->mask) + data->offset; - if (start < res->start) - start += data->mask + 1; - return start; -} - -static struct resource *find_io_region(struct pcmcia_socket *s, - unsigned long base, int num, - unsigned long align) -{ - struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, - dev_name(&s->dev)); - struct pcmcia_align_data data; - int ret; - - data.mask = align - 1; - data.offset = base & data.mask; - - ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, - base, 0, pcmcia_align, &data); - if (ret != 0) { - kfree(res); - res = NULL; - } - return res; -} - -static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr, - unsigned int *base, unsigned int num, - unsigned int align, struct resource **parent) -{ - int i, ret = 0; - - /* Check for an already-allocated window that must conflict with - * what was asked for. It is a hack because it does not catch all - * potential conflicts, just the most obvious ones. - */ - for (i = 0; i < MAX_IO_WIN; i++) { - if (!s->io[i].res) - continue; - - if (!*base) - continue; - - if ((s->io[i].res->start & (align-1)) == *base) - return -EBUSY; - } - - for (i = 0; i < MAX_IO_WIN; i++) { - struct resource *res = s->io[i].res; - unsigned int try; - - if (res && (res->flags & IORESOURCE_BITS) != - (attr & IORESOURCE_BITS)) - continue; - - if (!res) { - if (align == 0) - align = 0x10000; - - res = s->io[i].res = find_io_region(s, *base, num, - align); - if (!res) - return -EINVAL; - - *base = res->start; - s->io[i].res->flags = - ((res->flags & ~IORESOURCE_BITS) | - (attr & IORESOURCE_BITS)); - s->io[i].InUse = num; - *parent = res; - return 0; - } - - /* Try to extend top of window */ - try = res->end + 1; - if ((*base == 0) || (*base == try)) { - ret = adjust_resource(s->io[i].res, res->start, - resource_size(res) + num); - if (ret) - continue; - *base = try; - s->io[i].InUse += num; - *parent = res; - return 0; - } - - /* Try to extend bottom of window */ - try = res->start - num; - if ((*base == 0) || (*base == try)) { - ret = adjust_resource(s->io[i].res, - res->start - num, - resource_size(res) + num); - if (ret) - continue; - *base = try; - s->io[i].InUse += num; - *parent = res; - return 0; - } - } - return -EINVAL; -} - -static struct resource *res_pci_find_mem(u_long base, u_long num, - u_long align, int low, struct pcmcia_socket *s) -{ - struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, - dev_name(&s->dev)); - struct pcmcia_align_data data; - unsigned long min; - int ret; - - if (align < 0x20000) - align = 0x20000; - data.mask = align - 1; - data.offset = base & data.mask; - - min = 0; - if (!low) - min = 0x100000UL; - - ret = pci_bus_alloc_resource(s->cb_dev->bus, - res, num, 1, min, 0, - pcmcia_align, &data); - - if (ret != 0) { - kfree(res); - res = NULL; - } - return res; -} - - -static int res_pci_init(struct pcmcia_socket *s) -{ - if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) { - dev_err(&s->dev, "not supported by res_pci\n"); - return -EOPNOTSUPP; - } - return 0; -} - -struct pccard_resource_ops pccard_nonstatic_ops = { - .validate_mem = NULL, - .find_io = res_pci_find_io, - .find_mem = res_pci_find_mem, - .init = res_pci_init, - .exit = NULL, -}; -EXPORT_SYMBOL(pccard_nonstatic_ops); diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c index 7c99ca256f05..8ccc3952c13d 100644 --- a/drivers/phy/phy-armada375-usb2.c +++ b/drivers/phy/phy-armada375-usb2.c @@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy) struct armada375_cluster_phy *cluster_phy; u32 reg; - cluster_phy = dev_get_drvdata(phy->dev.parent); + cluster_phy = phy_get_drvdata(phy); if (!cluster_phy) return -ENODEV; @@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev) cluster_phy->reg = usb_cluster_base; dev_set_drvdata(dev, cluster_phy); + phy_set_drvdata(phy, cluster_phy); phy_provider = devm_of_phy_provider_register(&pdev->dev, armada375_usb_phy_xlate); diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a12d35338313..3791838f4bd4 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res) static int devm_phy_match(struct device *dev, void *res, void *match_data) { - return res == match_data; + struct phy **phy = res; + + return *phy == match_data; } /** @@ -223,6 +225,7 @@ int phy_init(struct phy *phy) ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; + ret = 0; /* Override possible ret == -ENOTSUPP */ mutex_lock(&phy->mutex); if (phy->init_count == 0 && phy->ops->init) { @@ -231,8 +234,6 @@ int phy_init(struct phy *phy) dev_err(&phy->dev, "phy init failed --> %d\n", ret); goto out; } - } else { - ret = 0; /* Override possible ret == -ENOTSUPP */ } ++phy->init_count; @@ -253,6 +254,7 @@ int phy_exit(struct phy *phy) ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; + ret = 0; /* Override possible ret == -ENOTSUPP */ mutex_lock(&phy->mutex); if (phy->init_count == 1 && phy->ops->exit) { @@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy) ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; + ret = 0; /* Override possible ret == -ENOTSUPP */ mutex_lock(&phy->mutex); if (phy->power_count == 0 && phy->ops->power_on) { @@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy) dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); goto out; } - } else { - ret = 0; /* Override possible ret == -ENOTSUPP */ } ++phy->power_count; mutex_unlock(&phy->mutex); diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c index f86cbe68ddaf..179cbf9451aa 100644 --- a/drivers/phy/phy-exynos-dp-video.c +++ b/drivers/phy/phy-exynos-dp-video.c @@ -30,28 +30,13 @@ struct exynos_dp_video_phy { const struct exynos_dp_video_phy_drvdata *drvdata; }; -static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state, - unsigned int on) -{ - unsigned int val; - - if (IS_ERR(state->regs)) - return; - - val = on ? 0 : EXYNOS5_PHY_ENABLE; - - regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, - EXYNOS5_PHY_ENABLE, val); -} - static int exynos_dp_video_phy_power_on(struct phy *phy) { struct exynos_dp_video_phy *state = phy_get_drvdata(phy); /* Disable power isolation on DP-PHY */ - exynos_dp_video_phy_pwr_isol(state, 0); - - return 0; + return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, + EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE); } static int exynos_dp_video_phy_power_off(struct phy *phy) @@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy) struct exynos_dp_video_phy *state = phy_get_drvdata(phy); /* Enable power isolation on DP-PHY */ - exynos_dp_video_phy_pwr_isol(state, 1); - - return 0; + return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, + EXYNOS5_PHY_ENABLE, 0); } static struct phy_ops exynos_dp_video_phy_ops = { diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index f017b2f2a54e..df7519a39ba0 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c @@ -43,7 +43,6 @@ struct exynos_mipi_video_phy { } phys[EXYNOS_MIPI_PHYS_NUM]; spinlock_t slock; void __iomem *regs; - struct mutex mutex; struct regmap *regmap; }; @@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, else reset = EXYNOS4_MIPI_PHY_SRESETN; - if (state->regmap) { - mutex_lock(&state->mutex); + spin_lock(&state->slock); + + if (!IS_ERR(state->regmap)) { regmap_read(state->regmap, offset, &val); if (on) val |= reset; @@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) val &= ~EXYNOS4_MIPI_PHY_ENABLE; regmap_write(state->regmap, offset, val); - mutex_unlock(&state->mutex); } else { addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); - spin_lock(&state->slock); val = readl(addr); if (on) val |= reset; @@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, val &= ~EXYNOS4_MIPI_PHY_ENABLE; writel(val, addr); - spin_unlock(&state->slock); } + spin_unlock(&state->slock); return 0; } @@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, state); spin_lock_init(&state->slock); - mutex_init(&state->mutex); for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { struct phy *phy = devm_phy_create(dev, NULL, diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c index 236a52ad94eb..f30bbb0fb3b2 100644 --- a/drivers/phy/phy-exynos4210-usb2.c +++ b/drivers/phy/phy-exynos4210-usb2.c @@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = { .power_on = exynos4210_power_on, .power_off = exynos4210_power_off, }, - {}, }; const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c index 0b9de88579b1..765da90a536f 100644 --- a/drivers/phy/phy-exynos4x12-usb2.c +++ b/drivers/phy/phy-exynos4x12-usb2.c @@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = { .power_on = exynos4x12_power_on, .power_off = exynos4x12_power_off, }, - {}, }; const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index 04374018425f..e2a0be750ad9 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c @@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev, { struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); - if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) + if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM)) return ERR_PTR(-ENODEV); return phy_drd->phys[args->args[0]].phy; diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c index 1c139aa0d074..2ed1735a076a 100644 --- a/drivers/phy/phy-exynos5250-usb2.c +++ b/drivers/phy/phy-exynos5250-usb2.c @@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = { .power_on = exynos5250_power_on, .power_off = exynos5250_power_off, }, - {}, }; const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c index 34915b4202f1..d6b22659cac1 100644 --- a/drivers/phy/phy-hix5hd2-sata.c +++ b/drivers/phy/phy-hix5hd2-sata.c @@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + priv->base = devm_ioremap(dev, res->start, resource_size(res)); if (!priv->base) return -ENOMEM; diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index 9b2848e6115d..933435214acc 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c @@ -228,6 +228,7 @@ struct miphy28lp_dev { struct regmap *regmap; struct mutex miphy_mutex; struct miphy28lp_phy **phys; + int nphys; }; struct miphy_initval { @@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, return ERR_PTR(-EINVAL); } - for (index = 0; index < of_get_child_count(dev->of_node); index++) + for (index = 0; index < miphy_dev->nphys; index++) if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { miphy_phy = miphy_dev->phys[index]; break; @@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, static struct phy_ops miphy28lp_ops = { .init = miphy28lp_init, + .owner = THIS_MODULE, }; static int miphy28lp_probe_resets(struct device_node *node, @@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev) struct miphy28lp_dev *miphy_dev; struct phy_provider *provider; struct phy *phy; - int chancount, port = 0; - int ret; + int ret, port = 0; miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); if (!miphy_dev) return -ENOMEM; - chancount = of_get_child_count(np); - miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, - GFP_KERNEL); + miphy_dev->nphys = of_get_child_count(np); + miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, + sizeof(*miphy_dev->phys), GFP_KERNEL); if (!miphy_dev->phys) return -ENOMEM; diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 6c80154e8bff..51b459db9137 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c @@ -150,6 +150,7 @@ struct miphy365x_dev { struct regmap *regmap; struct mutex miphy_mutex; struct miphy365x_phy **phys; + int nphys; }; /* @@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev, return ERR_PTR(-EINVAL); } - for (index = 0; index < of_get_child_count(dev->of_node); index++) + for (index = 0; index < miphy_dev->nphys; index++) if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { miphy_phy = miphy_dev->phys[index]; break; @@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev) struct miphy365x_dev *miphy_dev; struct phy_provider *provider; struct phy *phy; - int chancount, port = 0; - int ret; + int ret, port = 0; miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); if (!miphy_dev) return -ENOMEM; - chancount = of_get_child_count(np); - miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, - GFP_KERNEL); + miphy_dev->nphys = of_get_child_count(np); + miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, + sizeof(*miphy_dev->phys), GFP_KERNEL); if (!miphy_dev->phys) return -ENOMEM; diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c index efe724f97e02..93252e053a31 100644 --- a/drivers/phy/phy-omap-control.c +++ b/drivers/phy/phy-omap-control.c @@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void) } module_exit(omap_control_phy_exit); -MODULE_ALIAS("platform: omap_control_phy"); +MODULE_ALIAS("platform:omap_control_phy"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 6f4aef3db248..4757e765696a 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "found usb_otg_ss_refclk960m, please fix DTS\n"); } - } else { - clk_prepare(phy->optclk); } + if (!IS_ERR(phy->optclk)) + clk_prepare(phy->optclk); + usb_add_phy_dev(&phy->phy); return 0; @@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = { module_platform_driver(omap_usb2_driver); -MODULE_ALIAS("platform: omap_usb2"); +MODULE_ALIAS("platform:omap_usb2"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP USB2 phy driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 22011c3b6a4b..7d4c33643768 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy) return ret; clk_disable_unprepare(phy->clk); - if (ret) - return ret; return 0; } @@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy) /* Power up usb phy analog blocks by set siddq 0 */ ret = rockchip_usb_phy_power(phy, 0); - if (ret) + if (ret) { + clk_disable_unprepare(phy->clk); return ret; + } return 0; } diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 95c88f929f27..2ba610b72ca2 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy) cpu_relax(); val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); if (val & PLL_LOCK) - break; + return 0; } while (!time_after(jiffies, timeout)); - if (!(val & PLL_LOCK)) { - dev_err(phy->dev, "DPLL failed to lock\n"); - return -EBUSY; - } - - return 0; + dev_err(phy->dev, "DPLL failed to lock\n"); + return -EBUSY; } static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) @@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = { module_platform_driver(ti_pipe3_driver); -MODULE_ALIAS("platform: ti_pipe3"); +MODULE_ALIAS("platform:ti_pipe3"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("TI PIPE3 phy driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 8e87f54671f3..bc42d6a8939f 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev) twl->dev = &pdev->dev; twl->irq = platform_get_irq(pdev, 0); twl->vbus_supplied = false; - twl->linkstat = -EINVAL; twl->linkstat = OMAP_MUSB_UNKNOWN; twl->phy.dev = twl->dev; diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c index 29214a36ea28..2263cd010032 100644 --- a/drivers/phy/phy-xgene.c +++ b/drivers/phy/phy-xgene.c @@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev) for (i = 0; i < MAX_LANE; i++) ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ - ctx->dev = &pdev->dev; platform_set_drvdata(pdev, ctx); ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 5afe03e28b91..2062c224e32f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -66,6 +66,10 @@ #define BYT_DIR_MASK (BIT(1) | BIT(2)) #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) +#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \ + BYT_PIN_MUX) +#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL) + #define BYT_NGPIO_SCORE 102 #define BYT_NGPIO_NCORE 28 #define BYT_NGPIO_SUS 44 @@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = { }, }; +struct byt_gpio_pin_context { + u32 conf0; + u32 val; +}; + struct byt_gpio { struct gpio_chip chip; struct platform_device *pdev; spinlock_t lock; void __iomem *reg_base; struct pinctrl_gpio_range *range; + struct byt_gpio_pin_context *saved_context; }; #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) @@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset, return vg->reg_base + reg_offset + reg; } -static bool is_special_pin(struct byt_gpio *vg, unsigned offset) +static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset) +{ + void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); + unsigned long flags; + u32 value; + + spin_lock_irqsave(&vg->lock, flags); + value = readl(reg); + value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + writel(value, reg); + spin_unlock_irqrestore(&vg->lock, flags); +} + +static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset) { /* SCORE pin 92-93 */ if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && offset >= 92 && offset <= 93) - return true; + return 1; /* SUS pin 11-21 */ if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && offset >= 11 && offset <= 21) - return true; + return 1; - return false; + return 0; } static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); - u32 value; - bool special; + u32 value, gpio_mux; /* * In most cases, func pin mux 000 means GPIO function. * But, some pins may have func pin mux 001 represents - * GPIO function. Only allow user to export pin with - * func pin mux preset as GPIO function by BIOS/FW. + * GPIO function. + * + * Because there are devices out there where some pins were not + * configured correctly we allow changing the mux value from + * request (but print out warning about that). */ value = readl(reg) & BYT_PIN_MUX; - special = is_special_pin(vg, offset); - if ((special && value != 1) || (!special && value)) { - dev_err(&vg->pdev->dev, - "pin %u cannot be used as GPIO.\n", offset); - return -EINVAL; + gpio_mux = byt_get_gpio_mux(vg, offset); + if (WARN_ON(gpio_mux != value)) { + unsigned long flags; + + spin_lock_irqsave(&vg->lock, flags); + value = readl(reg) & ~BYT_PIN_MUX; + value |= gpio_mux; + writel(value, reg); + spin_unlock_irqrestore(&vg->lock, flags); + + dev_warn(&vg->pdev->dev, + "pin %u forcibly re-configured as GPIO\n", offset); } pm_runtime_get(&vg->pdev->dev); @@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); - void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); - u32 value; - - /* clear interrupt triggering */ - value = readl(reg); - value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); - writel(value, reg); + byt_gpio_clear_triggering(vg, offset); pm_runtime_put(&vg->pdev->dev); } @@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type) value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); - switch (type) { - case IRQ_TYPE_LEVEL_HIGH: - value |= BYT_TRIG_LVL; - case IRQ_TYPE_EDGE_RISING: - value |= BYT_TRIG_POS; - break; - case IRQ_TYPE_LEVEL_LOW: - value |= BYT_TRIG_LVL; - case IRQ_TYPE_EDGE_FALLING: - value |= BYT_TRIG_NEG; - break; - case IRQ_TYPE_EDGE_BOTH: - value |= (BYT_TRIG_NEG | BYT_TRIG_POS); - break; - } writel(value, reg); + if (type & IRQ_TYPE_EDGE_BOTH) + __irq_set_handler_locked(d->irq, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + __irq_set_handler_locked(d->irq, handle_level_irq); + spin_unlock_irqrestore(&vg->lock, flags); return 0; @@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); struct irq_chip *chip = irq_data_get_irq_chip(data); - u32 base, pin, mask; + u32 base, pin; void __iomem *reg; - u32 pending; + unsigned long pending; unsigned virq; - int looplimit = 0; /* check from GPIO controller which pin triggered the interrupt */ for (base = 0; base < vg->chip.ngpio; base += 32) { - reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); - - while ((pending = readl(reg))) { - pin = __ffs(pending); - mask = BIT(pin); - /* Clear before handling so we can't lose an edge */ - writel(mask, reg); - + pending = readl(reg); + for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irqdomain, base + pin); generic_handle_irq(virq); - - /* In case bios or user sets triggering incorretly a pin - * might remain in "interrupt triggered" state. - */ - if (looplimit++ > 32) { - dev_err(&vg->pdev->dev, - "Gpio %d interrupt flood, disabling\n", - base + pin); - - reg = byt_gpio_reg(&vg->chip, base + pin, - BYT_CONF0_REG); - mask = readl(reg); - mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | - BYT_TRIG_LVL); - writel(mask, reg); - mask = readl(reg); /* flush */ - break; - } } } chip->irq_eoi(data); } +static void byt_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + unsigned offset = irqd_to_hwirq(d); + void __iomem *reg; + + reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG); + writel(BIT(offset % 32), reg); +} + static void byt_irq_unmask(struct irq_data *d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + unsigned offset = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *reg; + u32 value; + + spin_lock_irqsave(&vg->lock, flags); + + reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); + value = readl(reg); + + switch (irqd_get_trigger_type(d)) { + case IRQ_TYPE_LEVEL_HIGH: + value |= BYT_TRIG_LVL; + case IRQ_TYPE_EDGE_RISING: + value |= BYT_TRIG_POS; + break; + case IRQ_TYPE_LEVEL_LOW: + value |= BYT_TRIG_LVL; + case IRQ_TYPE_EDGE_FALLING: + value |= BYT_TRIG_NEG; + break; + case IRQ_TYPE_EDGE_BOTH: + value |= (BYT_TRIG_NEG | BYT_TRIG_POS); + break; + } + + writel(value, reg); + + spin_unlock_irqrestore(&vg->lock, flags); } static void byt_irq_mask(struct irq_data *d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + + byt_gpio_clear_triggering(vg, irqd_to_hwirq(d)); } static struct irq_chip byt_irqchip = { .name = "BYT-GPIO", + .irq_ack = byt_irq_ack, .irq_mask = byt_irq_mask, .irq_unmask = byt_irq_unmask, .irq_set_type = byt_irq_type, @@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) { void __iomem *reg; u32 base, value; + int i; + + /* + * Clear interrupt triggers for all pins that are GPIOs and + * do not use direct IRQ mode. This will prevent spurious + * interrupts from misconfigured pins. + */ + for (i = 0; i < vg->chip.ngpio; i++) { + value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG)); + if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && + !(value & BYT_DIRECT_IRQ_EN)) { + byt_gpio_clear_triggering(vg, i); + dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); + } + } /* clear interrupt status trigger registers */ for (base = 0; base < vg->chip.ngpio; base += 32) { @@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev) gc->can_sleep = false; gc->dev = dev; +#ifdef CONFIG_PM_SLEEP + vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio, + sizeof(*vg->saved_context), GFP_KERNEL); +#endif + ret = gpiochip_add(gc); if (ret) { dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); @@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int byt_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct byt_gpio *vg = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < vg->chip.ngpio; i++) { + void __iomem *reg; + u32 value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); + value = readl(reg) & BYT_CONF0_RESTORE_MASK; + vg->saved_context[i].conf0 = value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); + value = readl(reg) & BYT_VAL_RESTORE_MASK; + vg->saved_context[i].val = value; + } + + return 0; +} + +static int byt_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct byt_gpio *vg = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < vg->chip.ngpio; i++) { + void __iomem *reg; + u32 value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); + value = readl(reg); + if ((value & BYT_CONF0_RESTORE_MASK) != + vg->saved_context[i].conf0) { + value &= ~BYT_CONF0_RESTORE_MASK; + value |= vg->saved_context[i].conf0; + writel(value, reg); + dev_info(dev, "restored pin %d conf0 %#08x", i, value); + } + + reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); + value = readl(reg); + if ((value & BYT_VAL_RESTORE_MASK) != + vg->saved_context[i].val) { + u32 v; + + v = value & ~BYT_VAL_RESTORE_MASK; + v |= vg->saved_context[i].val; + if (v != value) { + writel(v, reg); + dev_dbg(dev, "restored pin %d val %#08x\n", + i, v); + } + } + } + + return 0; +} +#endif + static int byt_gpio_runtime_suspend(struct device *dev) { return 0; @@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev) } static const struct dev_pm_ops byt_gpio_pm_ops = { - .runtime_suspend = byt_gpio_runtime_suspend, - .runtime_resume = byt_gpio_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume) + SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume, + NULL) }; static const struct acpi_device_id byt_gpio_acpi_match[] = { diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 3034fd03bced..82f691eeeec4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { + chv_gpio_set(chip, offset, value); return pinctrl_gpio_direction_output(chip->base + offset); } diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index f4cd0b9b2438..a4814066ea08 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d) /* the interrupt is already cleared before by reading ISR */ } -static unsigned int gpio_irq_startup(struct irq_data *d) +static int gpio_irq_request_res(struct irq_data *d) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned pin = d->hwirq; int ret; ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); - if (ret) { + if (ret) dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", d->hwirq); - return ret; - } - gpio_irq_unmask(d); - return 0; + + return ret; } -static void gpio_irq_shutdown(struct irq_data *d) +static void gpio_irq_release_res(struct irq_data *d) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned pin = d->hwirq; - gpio_irq_mask(d); gpiochip_unlock_as_irq(&at91_gpio->chip, pin); } @@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void) static struct irq_chip gpio_irqchip = { .name = "GPIO", .irq_ack = gpio_irq_ack, - .irq_startup = gpio_irq_startup, - .irq_shutdown = gpio_irq_shutdown, + .irq_request_resources = gpio_irq_request_res, + .irq_release_resources = gpio_irq_release_res, .irq_disable = gpio_irq_mask, .irq_mask = gpio_irq_mask, .irq_unmask = gpio_irq_unmask, diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 24c5d88f943f..3c68a8e5e0dd 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = { .pins = sun4i_a10_pins, .npins = ARRAY_SIZE(sun4i_a10_pins), .irq_banks = 1, + .irq_read_needs_mux = true, }; static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 3d0744337736..f8e171b76693 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include "../core.h" +#include "../../gpio/gpiolib.h" #include "pinctrl-sunxi.h" static struct irq_chip sunxi_pinctrl_edge_irq_chip; @@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) { struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); - u32 reg = sunxi_data_reg(offset); u8 index = sunxi_data_offset(offset); - u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; + u32 set_mux = pctl->desc->irq_read_needs_mux && + test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); + u32 val; + + if (set_mux) + sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT); + + val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; + + if (set_mux) + sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ); return val; } diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 5a51523a3459..e248e81a0f9e 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -77,6 +77,9 @@ #define IRQ_LEVEL_LOW 0x03 #define IRQ_EDGE_BOTH 0x04 +#define SUN4I_FUNC_INPUT 0 +#define SUN4I_FUNC_IRQ 6 + struct sunxi_desc_function { const char *name; u8 muxval; @@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc { int npins; unsigned pin_base; unsigned irq_banks; + bool irq_read_needs_mux; }; struct sunxi_pinctrl_function { diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 97b5e4ee1ca4..63d4033eb683 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -73,7 +73,7 @@ #define TIME_WINDOW_MAX_MSEC 40000 #define TIME_WINDOW_MIN_MSEC 250 - +#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ enum unit_type { ARBITRARY_UNIT, /* no translation */ POWER_UNIT, @@ -158,6 +158,7 @@ struct rapl_domain { struct rapl_power_limit rpl[NR_POWER_LIMITS]; u64 attr_map; /* track capabilities */ unsigned int state; + unsigned int domain_energy_unit; int package_id; }; #define power_zone_to_rapl_domain(_zone) \ @@ -190,6 +191,7 @@ struct rapl_defaults { void (*set_floor_freq)(struct rapl_domain *rd, bool mode); u64 (*compute_time_window)(struct rapl_package *rp, u64 val, bool to_raw); + unsigned int dram_domain_energy_unit; }; static struct rapl_defaults *rapl_defaults; @@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, static int rapl_write_data_raw(struct rapl_domain *rd, enum rapl_primitives prim, unsigned long long value); -static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, +static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, + enum unit_type type, u64 value, int to_raw); static void package_power_limit_irq_save(int package_id); @@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) { - *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); + struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); + + *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); return 0; } @@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp) rd->msrs[4] = MSR_DRAM_POWER_INFO; rd->rpl[0].prim_id = PL1_ENABLE; rd->rpl[0].name = pl1_name; + rd->domain_energy_unit = + rapl_defaults->dram_domain_energy_unit; + if (rd->domain_energy_unit) + pr_info("DRAM domain energy unit %dpj\n", + rd->domain_energy_unit); break; } if (mask) { @@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp) } } -static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, +static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, + enum unit_type type, u64 value, int to_raw) { u64 units = 1; struct rapl_package *rp; + u64 scale = 1; rp = find_package_by_id(package); if (!rp) @@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, units = rp->power_unit; break; case ENERGY_UNIT: - units = rp->energy_unit; + scale = ENERGY_UNIT_SCALE; + /* per domain unit takes precedence */ + if (rd && rd->domain_energy_unit) + units = rd->domain_energy_unit; + else + units = rp->energy_unit; break; case TIME_UNIT: return rapl_defaults->compute_time_window(rp, value, to_raw); @@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, }; if (to_raw) - return div64_u64(value, units); + return div64_u64(value, units) * scale; value *= units; - return value; + return div64_u64(value, scale); } /* in the order of enum rapl_primitives */ @@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, final = value & rp->mask; final = final >> rp->shift; if (xlate) - *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); + *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); else *data = final; @@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, "failed to read msr 0x%x on cpu %d\n", msr, cpu); return -EIO; } - value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); + value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1); msr_val &= ~rp->mask; msr_val |= value << rp->shift; if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { @@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, * calculate units differ on different CPUs. * We convert the units to below format based on CPUs. * i.e. - * energy unit: microJoules : Represented in microJoules by default + * energy unit: picoJoules : Represented in picoJoules by default * power unit : microWatts : Represented in milliWatts by default * time unit : microseconds: Represented in seconds by default */ @@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) } value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit = 1000000 / (1 << value); + rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit = 1000000 / (1 << value); @@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; rp->time_unit = 1000000 / (1 << value); - pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", + pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); return 0; @@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) return -ENODEV; } value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit = 1 << value; + rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit = (1 << value) * 1000; @@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; rp->time_unit = 1000000 / (1 << value); - pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", + pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); return 0; @@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = { .compute_time_window = rapl_compute_time_window_core, }; +static const struct rapl_defaults rapl_defaults_hsw_server = { + .check_unit = rapl_check_unit_core, + .set_floor_freq = set_floor_freq_default, + .compute_time_window = rapl_compute_time_window_core, + .dram_domain_energy_unit = 15300, +}; + static const struct rapl_defaults rapl_defaults_atom = { .check_unit = rapl_check_unit_atom, .set_floor_freq = set_floor_freq_atom, @@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = { RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ - RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ + RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..a4a8a6dc60c4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev) } if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, true); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 1; + if (!rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, true); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 1; + } } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) @@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, false); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 0; + if (rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, false); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 0; + } } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); @@ -3444,13 +3448,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, if (attr == &dev_attr_requested_microamps.attr) return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; - /* all the other attributes exist to support constraints; - * don't show them if there are no constraints, or if the - * relevant supporting methods are missing. - */ - if (!rdev->constraints) - return 0; - /* constraints need specific supporting methods */ if (attr == &dev_attr_min_microvolts.attr || attr == &dev_attr_max_microvolts.attr) @@ -3633,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc, config->ena_gpio, ret); goto wash; } - - if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) - rdev->ena_gpio_state = 1; - - if (config->ena_gpio_invert) - rdev->ena_gpio_state = !rdev->ena_gpio_state; } /* set regulator constraints */ @@ -3807,9 +3798,11 @@ int regulator_suspend_finish(void) list_for_each_entry(rdev, ®ulator_list, list) { mutex_lock(&rdev->mutex); if (rdev->use_count > 0 || rdev->constraints->always_on) { - error = _regulator_do_enable(rdev); - if (error) - ret = error; + if (!_regulator_is_enabled(rdev)) { + error = _regulator_do_enable(rdev); + if (error) + ret = error; + } } else { if (!have_full_constraints()) goto unlock; diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, config.regmap = chip->regmap; config.of_node = dev->of_node; + /* Mask all interrupt sources to deassert interrupt line */ + error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); + if (!error) + error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); + if (error) { + dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); + return error; + } + rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); if (IS_ERR(rdev)) { dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 9205f433573c..18198316b6cf 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev) if (!pmic) return -ENOMEM; + if (of_device_is_compatible(node, "ti,tps659038-pmic")) + palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr = + TPS659038_REGEN2_CTRL; + pmic->dev = &pdev->dev; pmic->palmas = palmas; palmas->pmic = pmic; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(0), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG2", @@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(1), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG3", @@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_BUCK4_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(2), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG4", @@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(3), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG5", @@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(4), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG6", @@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(5), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG7", @@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(6), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG8", @@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(7), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "SWITCH_REG1", diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index e2cffe01b807..fb991ec76423 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 92f6af6da699..73354ee27877 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev) void *bufs_va; int err = 0, i; size_t total_buf_space; + bool notify; vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); if (!vrp) @@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev) } } + /* + * Prepare to kick but don't notify yet - we can't do this before + * device is ready. + */ + notify = virtqueue_kick_prepare(vrp->rvq); + + /* From this point on, we can notify and get callbacks. */ + virtio_device_ready(vdev); + /* tell the remote processor it can start sending messages */ - virtqueue_kick(vrp->rvq); + /* + * this might be concurrent with callbacks, but we are only + * doing notify, not a full kick here, so that's ok. + */ + if (notify) + virtqueue_notify(vrp->rvq); dev_info(&vdev->dev, "rpmsg host is online\n"); diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index b4f7744f6751..b283a1a573b3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) ret = IRQ_HANDLED; } - spin_lock(&suspended_lock); + spin_unlock(&suspended_lock); return ret; } diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index e2436d140175..3a6fd3a8a2ec 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c @@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev) mrst->dev = NULL; } -#ifdef CONFIG_PM -static int mrst_suspend(struct device *dev, pm_message_t mesg) +#ifdef CONFIG_PM_SLEEP +static int mrst_suspend(struct device *dev) { struct mrst_rtc *mrst = dev_get_drvdata(dev); unsigned char tmp; @@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg) */ static inline int mrst_poweroff(struct device *dev) { - return mrst_suspend(dev, PMSG_HIBERNATE); + return mrst_suspend(dev); } static int mrst_resume(struct device *dev) @@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume); +#define MRST_PM_OPS (&mrst_pm_ops) + #else -#define mrst_suspend NULL -#define mrst_resume NULL +#define MRST_PM_OPS NULL static inline int mrst_poweroff(struct device *dev) { @@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = { .remove = vrtc_mrst_platform_remove, .shutdown = vrtc_mrst_platform_shutdown, .driver = { - .name = (char *) driver_name, - .suspend = mrst_suspend, - .resume = mrst_resume, + .name = driver_name, + .pm = MRST_PM_OPS, } }; diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { static struct s3c_rtc_data const s3c6410_rtc_data = { .max_user_freq = 32768, + .needs_src_clk = true, .irq_handler = s3c6410_rtc_irq, .set_freq = s3c6410_rtc_setfreq, .enable_tick = s3c6410_rtc_enable_tick, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char * parse input */ num_of_segments = 0; - for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { + for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { for (j = i; (buf[j] != ':') && (buf[j] != '\0') && (buf[j] != '\n') && diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) add = 0; continue; } - for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { + for (pos = 0; pos < iter->aob->request.msb_count; pos++) { if (clusters_intersect(req, iter->request[pos]) && (rq_data_dir(req) == WRITE || rq_data_dir(iter->request[pos]) == WRITE)) { diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9219953ee949..d9afc51af7d3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = { }; static struct ata_port_info sata_port_info = { - .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | + ATA_FLAG_SAS_HOST, .pio_mask = ATA_PIO4_ONLY, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 932d9cc98d2f..9c706d8c1441 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = { }; static struct ata_port_info sata_port_info = { - .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | + ATA_FLAG_SAS_HOST, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; + struct domain_device *ddev = port->port_dev; /* prevent revalidation from finding sata links in recovery */ mutex_lock(&ha->disco_mutex); @@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); - if (port->port_dev) - res = sas_ex_revalidate_domain(port->port_dev); + if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || + ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) + res = sas_ex_revalidate_domain(ddev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 99f43b7fc9ab..ab4879e12ea7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( /* * Finally register the new FC Nexus with TCM */ - __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); + transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); return 0; } diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, (unsigned long long)xfer->rx_dma); } - /* REVISIT: We're waiting for ENDRX before we start the next + /* REVISIT: We're waiting for RXBUFF before we start the next * transfer because we need to handle some difficult timing - * issues otherwise. If we wait for ENDTX in one transfer and - * then starts waiting for ENDRX in the next, it's difficult - * to tell the difference between the ENDRX interrupt we're - * actually waiting for and the ENDRX interrupt of the + * issues otherwise. If we wait for TXBUFE in one transfer and + * then starts waiting for RXBUFF in the next, it's difficult + * to tell the difference between the RXBUFF interrupt we're + * actually waiting for and the RXBUFF interrupt of the * previous transfer. * * It should be doable, though. Just not now... */ - spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); + spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..4f8c798e0633 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg) { struct dw_spi *dws = arg; - if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) + clear_bit(TX_BUSY, &dws->dma_chan_busy); + if (test_bit(RX_BUSY, &dws->dma_chan_busy)) return; dw_spi_xfer_done(dws); } @@ -139,6 +140,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + return NULL; + txdesc->callback = dw_spi_dma_tx_done; txdesc->callback_param = dws; @@ -153,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg) { struct dw_spi *dws = arg; - if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) + clear_bit(RX_BUSY, &dws->dma_chan_busy); + if (test_bit(TX_BUSY, &dws->dma_chan_busy)) return; dw_spi_xfer_done(dws); } @@ -184,6 +189,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + return NULL; + rxdesc->callback = dw_spi_dma_rx_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -36,13 +36,13 @@ struct spi_pci_desc { static struct spi_pci_desc spi_pci_mid_desc_1 = { .setup = dw_spi_mid_init, - .num_cs = 32, + .num_cs = 5, .bus_num = 0, }; static struct spi_pci_desc spi_pci_mid_desc_2 = { .setup = dw_spi_mid_init, - .num_cs = 4, + .num_cs = 2, .bus_num = 1, }; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) if (!dws->fifo_len) { u32 fifo; - for (fifo = 2; fifo <= 256; fifo++) { + for (fifo = 1; fifo < 256; fifo++) { dw_writew(dws, DW_SPI_TXFLTR, fifo); if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) break; } dw_writew(dws, DW_SPI_TXFLTR, 0); - dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; + dws->fifo_len = (fifo == 1) ? 0 : fifo; dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); } } diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, unsigned long flags; int ret; + if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { + dev_err(spfi->dev, + "Transfer length (%d) is greater than the max supported (%d)", + xfer->len, SPFI_TRANSACTION_TSIZE_MASK); + return -EINVAL; + } + /* * Stop all DMA and reset the controller if the previous transaction * timed-out and never completed it's DMA. diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; - spi_finalize_current_message(pl022->master); /* disable the SPI/SSP operation */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + spi_finalize_current_message(pl022->master); } /** diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index ff9cdbdb6672..2b2c359f5a50 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev) struct resource *res; struct device *dev; void __iomem *base; - u32 max_freq, iomode; + u32 max_freq, iomode, num_cs; int ret, irq, size; dev = &pdev->dev; @@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev) } /* use num-cs unless not present or out of range */ - if (of_property_read_u16(dev->of_node, "num-cs", - &master->num_chipselect) || - (master->num_chipselect > SPI_NUM_CHIPSELECTS)) + if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || + num_cs > SPI_NUM_CHIPSELECTS) master->num_chipselect = SPI_NUM_CHIPSELECTS; + else + master->num_chipselect = num_cs; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -101,6 +101,7 @@ struct ti_qspi { #define QSPI_FLEN(n) ((n - 1) << 0) /* STATUS REGISTER */ +#define BUSY 0x01 #define WC 0x02 /* INTERRUPT REGISTER */ @@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); } +static inline u32 qspi_is_busy(struct ti_qspi *qspi) +{ + u32 stat; + unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; + + stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); + while ((stat & BUSY) && time_after(timeout, jiffies)) { + cpu_relax(); + stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); + } + + WARN(stat & BUSY, "qspi busy\n"); + return stat & BUSY; +} + static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) { int wlen, count; @@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) wlen = t->bits_per_word >> 3; /* in bytes */ while (count) { + if (qspi_is_busy(qspi)) + return -EBUSY; + switch (wlen) { case 1: dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", @@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); + if (qspi_is_busy(qspi)) + return -EBUSY; + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (!wait_for_completion_timeout(&qspi->transfer_complete, QSPI_COMPLETION_TIMEOUT)) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c64a3e59fce3..57a195041dc7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master) "failed to unprepare message: %d\n", ret); } } + + trace_spi_message_done(mesg); + master->cur_msg_prepared = false; mesg->state = NULL; if (mesg->complete) mesg->complete(mesg->context); - - trace_spi_message_done(mesg); } EXPORT_SYMBOL_GPL(spi_finalize_current_message); diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 4324282afe49..03b2a90b9ac0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice) /* zonetype initial */ pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; - /* Get RFType */ - pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE); - - /* force change RevID for VT3253 emu */ - if ((pDevice->byRFType & RF_EMU) != 0) - pDevice->byRevId = 0x80; - - pDevice->byRFType &= RF_MASK; - pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType); - if (!pDevice->bZoneRegExist) pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; @@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; PSTxDesc head_td; - u32 dma_idx = TYPE_AC0DMA; + u32 dma_idx; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - if (!ieee80211_is_data(hdr->frame_control)) + if (ieee80211_is_data(hdr->frame_control)) + dma_idx = TYPE_AC0DMA; + else dma_idx = TYPE_TXDMA0; if (AVAIL_TD(priv, dma_idx) < 1) { @@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) head_td->pTDInfo->skb = skb; + if (dma_idx == TYPE_AC0DMA) + head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; + priv->iTDUsed[dma_idx]++; /* Take ownership */ @@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); - if (dma_idx == TYPE_AC0DMA) { - head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; - + if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) MACvTransmitAC0(priv->PortOffset); - } else { + else MACvTransmit0(priv->PortOffset); - } spin_unlock_irqrestore(&priv->lock, flags); @@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) MACvInitialize(priv->PortOffset); MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); + /* Get RFType */ + priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE); + priv->byRFType &= RF_MASK; + + dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType); + device_get_options(priv); device_set_options(priv); /* Mask out the options cannot be set to the chip */ diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 941b2adca95a..7626f635f160 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c @@ -794,6 +794,7 @@ bool RFbSetPower( break; case RATE_6M: case RATE_9M: + case RATE_12M: case RATE_18M: byPwr = priv->abyOFDMPwrTbl[uCH]; if (priv->byRFType == RF_UW2452) diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index c42cde59f598..c4286ccac320 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel) break; case RATE_6M: case RATE_9M: + case RATE_12M: case RATE_18M: case RATE_24M: case RATE_36M: diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 50bad55a0c42..2accb6e47beb 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4256,11 +4256,17 @@ int iscsit_close_connection( pr_debug("Closing iSCSI connection CID %hu on SID:" " %u\n", conn->cid, sess->sid); /* - * Always up conn_logout_comp just in case the RX Thread is sleeping - * and the logout response never got sent because the connection - * failed. + * Always up conn_logout_comp for the traditional TCP case just in case + * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout + * response never got sent because the connection failed. + * + * However for iser-target, isert_wait4logout() is using conn_logout_comp + * to signal logout response TX interrupt completion. Go ahead and skip + * this for iser since isert_rx_opcode() does not wait on logout failure, + * and to avoid iscsi_conn pointer dereference in iser-target code. */ - complete(&conn->conn_logout_comp); + if (conn->conn_transport->transport_type == ISCSI_TCP) + complete(&conn->conn_logout_comp); iscsi_release_thread_set(conn); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1c197bad6132..bdd8731a4daa 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -22,7 +22,6 @@ #include <target/target_core_fabric.h> #include <target/iscsi/iscsi_target_core.h> -#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" #include "iscsi_target_erl0.h" @@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); - if (conn->conn_transport->transport_type == ISCSI_TCP) - iscsit_close_connection(conn); + iscsit_close_connection(conn); return; } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 6b3c32954689..c36bd7c29136 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -953,11 +953,8 @@ static int tcm_loop_make_nexus( transport_free_session(tl_nexus->se_sess); goto out; } - /* - * Now, register the SAS I_T Nexus as active with the call to - * transport_register_session() - */ - __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, + /* Now, register the SAS I_T Nexus as active. */ + transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); tl_tpg->tl_nexus = tl_nexus; pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 58f49ff69b14..79b4ec3ca2db 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) return aligned_max_sectors; } +bool se_dev_check_wce(struct se_device *dev) +{ + bool wce = false; + + if (dev->transport->get_write_cache) + wce = dev->transport->get_write_cache(dev); + else if (dev->dev_attrib.emulate_write_cache > 0) + wce = true; + + return wce; +} + int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) @@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_fua_write not supported for this device\n"); + return -EINVAL; + } + if (dev->export_count) { + pr_err("emulate_fua_write cannot be changed with active" + " exports: %d\n", dev->export_count); + return -EINVAL; + } dev->dev_attrib.emulate_fua_write = flag; pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", dev, dev->dev_attrib.emulate_fua_write); @@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("emulate_write_cache not supported for this device\n"); return -EINVAL; } - + if (dev->export_count) { + pr_err("emulate_write_cache cannot be changed with active" + " exports: %d\n", dev->export_count); + return -EINVAL; + } dev->dev_attrib.emulate_write_cache = flag; pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", dev, dev->dev_attrib.emulate_write_cache); @@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev) ret = dev->transport->configure_device(dev); if (ret) goto out; - dev->dev_flags |= DF_CONFIGURED; - /* * XXX: there is not much point to have two different values here.. */ @@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev) list_add_tail(&dev->g_dev_node, &g_device_list); mutex_unlock(&g_device_mutex); + dev->dev_flags |= DF_CONFIGURED; + return 0; out_free_alua: diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 1045dcd7bf65..f6c954c4635f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev) struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); struct scsi_device *sd = pdv->pdv_sd; - return sd->type; + return (sd) ? sd->type : TYPE_NO_LUN; } static sector_t pscsi_get_blocks(struct se_device *dev) diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 9a2f9d3a6e70..3e7297411110 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) } } if (cdb[1] & 0x8) { - if (!dev->dev_attrib.emulate_fua_write || - !dev->dev_attrib.emulate_write_cache) { + if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { pr_err("Got CDB: 0x%02x with FUA bit set, but device" " does not advertise support for FUA write\n", cdb[0]); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 460e93109473..6c8bd6bc175c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -454,19 +454,6 @@ check_scsi_name: } EXPORT_SYMBOL(spc_emulate_evpd_83); -static bool -spc_check_dev_wce(struct se_device *dev) -{ - bool wce = false; - - if (dev->transport->get_write_cache) - wce = dev->transport->get_write_cache(dev); - else if (dev->dev_attrib.emulate_write_cache > 0) - wce = true; - - return wce; -} - /* Extended INQUIRY Data VPD Page */ static sense_reason_t spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) @@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (spc_check_dev_wce(dev)) + if (se_dev_check_wce(dev)) buf[6] = 0x01; /* If an LBA map is present set R_SUP */ spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); @@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) if (pc == 1) goto out; - if (spc_check_dev_wce(dev)) + if (se_dev_check_wce(dev)) p[2] = 0x04; /* Write Cache Enable */ p[12] = 0x20; /* Disabled Read Ahead */ @@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) spc_modesense_write_protect(&buf[length], type); - if ((spc_check_dev_wce(dev)) && + if ((se_dev_check_wce(dev)) && (dev->dev_attrib.emulate_fua_write > 0)) spc_modesense_dpofua(&buf[length], type); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0adc0f650213..ac3cbabdbdf0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + if (ret && ack_kref) + target_put_sess_cmd(se_sess, se_cmd); + return ret; } EXPORT_SYMBOL(target_get_sess_cmd); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 97b486c3dda1..583e755d8091 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) ep = fc_seq_exch(seq); if (ep) { lport = ep->lp; - if (lport && (ep->xid <= lport->lro_xid)) + if (lport && (ep->xid <= lport->lro_xid)) { /* * "ddp_done" trigger invalidation of HW * specific DDP context @@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) * identified using ep->xid) */ cmd->was_ddp_setup = 0; + } } } } diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 2ab229ddee38..6ae5b8560e4d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) dw8250_force_idle(p); writeb(value, p->membase + (UART_LCR << p->regshift)); } - dev_err(p->dev, "Couldn't set LCR to %d\n", value); + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ } } @@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value) __raw_writeq(value & 0xff, p->membase + (UART_LCR << p->regshift)); } - dev_err(p->dev, "Couldn't set LCR to %d\n", value); + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ } } #endif /* CONFIG_64BIT */ @@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) dw8250_force_idle(p); writel(value, p->membase + (UART_LCR << p->regshift)); } - dev_err(p->dev, "Couldn't set LCR to %d\n", value); + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ } } diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ff451048c1ac..4bfb7ac0239f 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -929,6 +929,13 @@ __acquires(hwep->lock) return retval; } +static int otg_a_alt_hnp_support(struct ci_hdrc *ci) +{ + dev_warn(&ci->gadget.dev, + "connect the device to an alternate port if you want HNP\n"); + return isr_setup_status_phase(ci); +} + /** * isr_setup_packet_handler: setup packet handler * @ci: UDC descriptor @@ -1061,6 +1068,10 @@ __acquires(ci->lock) ci); } break; + case USB_DEVICE_A_ALT_HNP_SUPPORT: + if (ci_otg_is_fsm_mode(ci)) + err = otg_a_alt_hnp_support(ci); + break; default: goto delegate; } diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index c6b35b77dab7..61d538aa2346 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c @@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) break; case OTG_STATE_B_PERIPHERAL: otg_chrg_vbus(fsm, 0); - otg_loc_conn(fsm, 1); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_GADGET); + otg_loc_conn(fsm, 1); break; case OTG_STATE_B_WAIT_ACON: otg_chrg_vbus(fsm, 0); @@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) break; case OTG_STATE_A_PERIPHERAL: - otg_loc_conn(fsm, 1); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_GADGET); otg_drv_vbus(fsm, 1); + otg_loc_conn(fsm, 1); otg_add_timer(fsm, A_BIDL_ADIS); break; case OTG_STATE_A_WAIT_VFALL: diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 02e3e2d4ea56..6cf047878dba 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) dwc2_is_host_mode(hsotg) ? "Host" : "Device", dwc2_op_state_str(hsotg)); + if (hsotg->op_state == OTG_STATE_A_HOST) + dwc2_hcd_disconnect(hsotg); + /* Change to L3 (OFF) state */ hsotg->lx_state = DWC2_L3; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -144,10 +144,9 @@ struct ffs_io_data { bool read; struct kiocb *kiocb; - const struct iovec *iovec; - unsigned long nr_segs; - char __user *buf; - size_t len; + struct iov_iter data; + const void *to_free; + char *buf; struct mm_struct *mm; struct work_struct work; @@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->req->actual; if (io_data->read && ret > 0) { - int i; - size_t pos = 0; - - /* - * Since req->length may be bigger than io_data->len (after - * being rounded up to maxpacketsize), we may end up with more - * data then user space has space for. - */ - ret = min_t(int, ret, io_data->len); - use_mm(io_data->mm); - for (i = 0; i < io_data->nr_segs; i++) { - size_t len = min_t(size_t, ret - pos, - io_data->iovec[i].iov_len); - if (!len) - break; - if (unlikely(copy_to_user(io_data->iovec[i].iov_base, - &io_data->buf[pos], len))) { - ret = -EFAULT; - break; - } - pos += len; - } + ret = copy_to_iter(io_data->buf, ret, &io_data->data); + if (iov_iter_count(&io_data->data)) + ret = -EFAULT; unuse_mm(io_data->mm); } @@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->kiocb->private = NULL; if (io_data->read) - kfree(io_data->iovec); + kfree(io_data->to_free); kfree(io_data->buf); kfree(io_data); } @@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * before the waiting completes, so do not assign to 'gadget' earlier */ struct usb_gadget *gadget = epfile->ffs->gadget; + size_t copied; spin_lock_irq(&epfile->ffs->eps_lock); /* In the meantime, endpoint got disabled or changed. */ @@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); return -ESHUTDOWN; } + data_len = iov_iter_count(&io_data->data); /* * Controller may require buffer size to be aligned to * maxpacketsize of an out endpoint. */ - data_len = io_data->read ? - usb_ep_align_maybe(gadget, ep->ep, io_data->len) : - io_data->len; + if (io_data->read) + data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); spin_unlock_irq(&epfile->ffs->eps_lock); data = kmalloc(data_len, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; - if (io_data->aio && !io_data->read) { - int i; - size_t pos = 0; - for (i = 0; i < io_data->nr_segs; i++) { - if (unlikely(copy_from_user(&data[pos], - io_data->iovec[i].iov_base, - io_data->iovec[i].iov_len))) { - ret = -EFAULT; - goto error; - } - pos += io_data->iovec[i].iov_len; - } - } else { - if (!io_data->read && - unlikely(__copy_from_user(data, io_data->buf, - io_data->len))) { + if (!io_data->read) { + copied = copy_from_iter(data, data_len, &io_data->data); + if (copied != data_len) { ret = -EFAULT; goto error; } @@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) */ ret = ep->status; if (io_data->read && ret > 0) { - ret = min_t(size_t, ret, io_data->len); - - if (unlikely(copy_to_user(io_data->buf, - data, ret))) + ret = copy_to_iter(data, ret, &io_data->data); + if (unlikely(iov_iter_count(&io_data->data))) ret = -EFAULT; } } @@ -898,37 +864,6 @@ error: return ret; } -static ssize_t -ffs_epfile_write(struct file *file, const char __user *buf, size_t len, - loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = false; - io_data.buf = (char * __user)buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - -static ssize_t -ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = true; - io_data.buf = buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - static int ffs_epfile_open(struct inode *inode, struct file *file) { @@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) return value; } -static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) { - struct ffs_io_data *io_data; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; + } - io_data->aio = true; - io_data->read = false; - io_data->kiocb = kiocb; - io_data->iovec = iovec; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = false; + p->kiocb = kiocb; + p->data = *from; + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + if (p->aio) + kfree(p); + else + *from = p->data; + return res; } -static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) { - struct ffs_io_data *io_data; - struct iovec *iovec_copy; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); - if (unlikely(!iovec_copy)) - return -ENOMEM; - - memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); - - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) { - kfree(iovec_copy); - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; } - io_data->aio = true; - io_data->read = true; - io_data->kiocb = kiocb; - io_data->iovec = iovec_copy; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = true; + p->kiocb = kiocb; + if (p->aio) { + p->to_free = dup_iter(&p->data, to, GFP_KERNEL); + if (!p->to_free) { + kfree(p); + return -ENOMEM; + } + } else { + p->data = *to; + p->to_free = NULL; + } + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + + if (p->aio) { + kfree(p->to_free); + kfree(p); + } else { + *to = p->data; + } + return res; } static int @@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { .llseek = no_llseek, .open = ffs_epfile_open, - .write = ffs_epfile_write, - .read = ffs_epfile_read, - .aio_write = ffs_epfile_aio_write, - .aio_read = ffs_epfile_aio_read, + .write = new_sync_write, + .read = new_sync_read, + .write_iter = ffs_epfile_write_iter, + .read_iter = ffs_epfile_read_iter, .release = ffs_epfile_release, .unlocked_ioctl = ffs_epfile_ioctl, }; diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 298b46112b1a..39f49f1ad22f 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop) struct usb_composite_dev *cdev; cdev = loop->function.config->cdev; - disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, - NULL); + disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL); VDBG(cdev, "%s disabled\n", loop->function.name); } diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index e3dae47baef3..3a5ae9900b1e 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -23,15 +23,6 @@ #include "gadget_chips.h" #include "u_f.h" -#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x) - -enum eptype { - EP_CONTROL = 0, - EP_BULK, - EP_ISOC, - EP_INTERRUPT, -}; - /* * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral * controller drivers. @@ -64,8 +55,6 @@ struct f_sourcesink { struct usb_ep *out_ep; struct usb_ep *iso_in_ep; struct usb_ep *iso_out_ep; - struct usb_ep *int_in_ep; - struct usb_ep *int_out_ep; int cur_alt; }; @@ -79,10 +68,6 @@ static unsigned isoc_interval; static unsigned isoc_maxpacket; static unsigned isoc_mult; static unsigned isoc_maxburst; -static unsigned int_interval; /* In ms */ -static unsigned int_maxpacket; -static unsigned int_mult; -static unsigned int_maxburst; static unsigned buflen; /*-------------------------------------------------------------------------*/ @@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = { /* .iInterface = DYNAMIC */ }; -static struct usb_interface_descriptor source_sink_intf_alt2 = { - .bLength = USB_DT_INTERFACE_SIZE, - .bDescriptorType = USB_DT_INTERFACE, - - .bAlternateSetting = 2, - .bNumEndpoints = 2, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - /* .iInterface = DYNAMIC */ -}; - /* full speed support: */ static struct usb_endpoint_descriptor fs_source_desc = { @@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = { .bInterval = 4, }; -static struct usb_endpoint_descriptor fs_int_source_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bEndpointAddress = USB_DIR_IN, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(64), - .bInterval = GZERO_INT_INTERVAL, -}; - -static struct usb_endpoint_descriptor fs_int_sink_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(64), - .bInterval = GZERO_INT_INTERVAL, -}; - static struct usb_descriptor_header *fs_source_sink_descs[] = { (struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &fs_sink_desc, @@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = { (struct usb_descriptor_header *) &fs_source_desc, (struct usb_descriptor_header *) &fs_iso_sink_desc, (struct usb_descriptor_header *) &fs_iso_source_desc, - (struct usb_descriptor_header *) &source_sink_intf_alt2, -#define FS_ALT_IFC_2_OFFSET 8 - (struct usb_descriptor_header *) &fs_int_sink_desc, - (struct usb_descriptor_header *) &fs_int_source_desc, NULL, }; @@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = { .bInterval = 4, }; -static struct usb_endpoint_descriptor hs_int_source_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(1024), - .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), -}; - -static struct usb_endpoint_descriptor hs_int_sink_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(1024), - .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), -}; - static struct usb_descriptor_header *hs_source_sink_descs[] = { (struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &hs_source_desc, @@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = { (struct usb_descriptor_header *) &hs_sink_desc, (struct usb_descriptor_header *) &hs_iso_source_desc, (struct usb_descriptor_header *) &hs_iso_sink_desc, - (struct usb_descriptor_header *) &source_sink_intf_alt2, -#define HS_ALT_IFC_2_OFFSET 8 - (struct usb_descriptor_header *) &hs_int_source_desc, - (struct usb_descriptor_header *) &hs_int_sink_desc, NULL, }; @@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = { .wBytesPerInterval = cpu_to_le16(1024), }; -static struct usb_endpoint_descriptor ss_int_source_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(1024), - .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), -}; - -static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { - .bLength = USB_DT_SS_EP_COMP_SIZE, - .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, - - .bMaxBurst = 0, - .bmAttributes = 0, - .wBytesPerInterval = cpu_to_le16(1024), -}; - -static struct usb_endpoint_descriptor ss_int_sink_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = cpu_to_le16(1024), - .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), -}; - -static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { - .bLength = USB_DT_SS_EP_COMP_SIZE, - .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, - - .bMaxBurst = 0, - .bmAttributes = 0, - .wBytesPerInterval = cpu_to_le16(1024), -}; - static struct usb_descriptor_header *ss_source_sink_descs[] = { (struct usb_descriptor_header *) &source_sink_intf_alt0, (struct usb_descriptor_header *) &ss_source_desc, @@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = { (struct usb_descriptor_header *) &ss_iso_source_comp_desc, (struct usb_descriptor_header *) &ss_iso_sink_desc, (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, - (struct usb_descriptor_header *) &source_sink_intf_alt2, -#define SS_ALT_IFC_2_OFFSET 14 - (struct usb_descriptor_header *) &ss_int_source_desc, - (struct usb_descriptor_header *) &ss_int_source_comp_desc, - (struct usb_descriptor_header *) &ss_int_sink_desc, - (struct usb_descriptor_header *) &ss_int_sink_comp_desc, NULL, }; @@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = { }; /*-------------------------------------------------------------------------*/ -static const char *get_ep_string(enum eptype ep_type) -{ - switch (ep_type) { - case EP_ISOC: - return "ISOC-"; - case EP_INTERRUPT: - return "INTERRUPT-"; - case EP_CONTROL: - return "CTRL-"; - case EP_BULK: - return "BULK-"; - default: - return "UNKNOWN-"; - } -} static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) { @@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, - struct usb_ep *iso_in, struct usb_ep *iso_out, - struct usb_ep *int_in, struct usb_ep *int_out) + struct usb_ep *iso_in, struct usb_ep *iso_out) { disable_ep(cdev, in); disable_ep(cdev, out); @@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev, disable_ep(cdev, iso_in); if (iso_out) disable_ep(cdev, iso_out); - if (int_in) - disable_ep(cdev, int_in); - if (int_out) - disable_ep(cdev, int_out); } static int @@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) return id; source_sink_intf_alt0.bInterfaceNumber = id; source_sink_intf_alt1.bInterfaceNumber = id; - source_sink_intf_alt2.bInterfaceNumber = id; /* allocate bulk endpoints */ ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); @@ -546,55 +412,14 @@ no_iso: if (isoc_maxpacket > 1024) isoc_maxpacket = 1024; - /* sanity check the interrupt module parameters */ - if (int_interval < 1) - int_interval = 1; - if (int_interval > 4096) - int_interval = 4096; - if (int_mult > 2) - int_mult = 2; - if (int_maxburst > 15) - int_maxburst = 15; - - /* fill in the FS interrupt descriptors from the module parameters */ - fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ? - 64 : int_maxpacket; - fs_int_source_desc.bInterval = int_interval > 255 ? - 255 : int_interval; - fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ? - 64 : int_maxpacket; - fs_int_sink_desc.bInterval = int_interval > 255 ? - 255 : int_interval; - - /* allocate int endpoints */ - ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc); - if (!ss->int_in_ep) - goto no_int; - ss->int_in_ep->driver_data = cdev; /* claim */ - - ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc); - if (ss->int_out_ep) { - ss->int_out_ep->driver_data = cdev; /* claim */ - } else { - ss->int_in_ep->driver_data = NULL; - ss->int_in_ep = NULL; -no_int: - fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL; - hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL; - ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL; - } - - if (int_maxpacket > 1024) - int_maxpacket = 1024; - /* support high speed hardware */ hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; /* - * Fill in the HS isoc and interrupt descriptors from the module - * parameters. We assume that the user knows what they are doing and - * won't give parameters that their UDC doesn't support. + * Fill in the HS isoc descriptors from the module parameters. + * We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. */ hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; @@ -607,17 +432,6 @@ no_int: hs_iso_sink_desc.bInterval = isoc_interval; hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; - hs_int_source_desc.wMaxPacketSize = int_maxpacket; - hs_int_source_desc.wMaxPacketSize |= int_mult << 11; - hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); - hs_int_source_desc.bEndpointAddress = - fs_int_source_desc.bEndpointAddress; - - hs_int_sink_desc.wMaxPacketSize = int_maxpacket; - hs_int_sink_desc.wMaxPacketSize |= int_mult << 11; - hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); - hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; - /* support super speed hardware */ ss_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; @@ -625,9 +439,9 @@ no_int: fs_sink_desc.bEndpointAddress; /* - * Fill in the SS isoc and interrupt descriptors from the module - * parameters. We assume that the user knows what they are doing and - * won't give parameters that their UDC doesn't support. + * Fill in the SS isoc descriptors from the module parameters. + * We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. */ ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; ss_iso_source_desc.bInterval = isoc_interval; @@ -646,37 +460,17 @@ no_int: isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; - ss_int_source_desc.wMaxPacketSize = int_maxpacket; - ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); - ss_int_source_comp_desc.bmAttributes = int_mult; - ss_int_source_comp_desc.bMaxBurst = int_maxburst; - ss_int_source_comp_desc.wBytesPerInterval = - int_maxpacket * (int_mult + 1) * (int_maxburst + 1); - ss_int_source_desc.bEndpointAddress = - fs_int_source_desc.bEndpointAddress; - - ss_int_sink_desc.wMaxPacketSize = int_maxpacket; - ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); - ss_int_sink_comp_desc.bmAttributes = int_mult; - ss_int_sink_comp_desc.bMaxBurst = int_maxburst; - ss_int_sink_comp_desc.wBytesPerInterval = - int_maxpacket * (int_mult + 1) * (int_maxburst + 1); - ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; - ret = usb_assign_descriptors(f, fs_source_sink_descs, hs_source_sink_descs, ss_source_sink_descs); if (ret) return ret; - DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " - "INT-IN/%s, INT-OUT/%s\n", + DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n", (gadget_is_superspeed(c->cdev->gadget) ? "super" : (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), f->name, ss->in_ep->name, ss->out_ep->name, ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", - ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", - ss->int_in_ep ? ss->int_in_ep->name : "<none>", - ss->int_out_ep ? ss->int_out_ep->name : "<none>"); + ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); return 0; } @@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) } static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, - enum eptype ep_type, int speed) + bool is_iso, int speed) { struct usb_ep *ep; struct usb_request *req; int i, size, status; for (i = 0; i < 8; i++) { - switch (ep_type) { - case EP_ISOC: + if (is_iso) { switch (speed) { case USB_SPEED_SUPER: size = isoc_maxpacket * (isoc_mult + 1) * @@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, } ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; req = ss_alloc_ep_req(ep, size); - break; - case EP_INTERRUPT: - switch (speed) { - case USB_SPEED_SUPER: - size = int_maxpacket * (int_mult + 1) * - (int_maxburst + 1); - break; - case USB_SPEED_HIGH: - size = int_maxpacket * (int_mult + 1); - break; - default: - size = int_maxpacket > 1023 ? - 1023 : int_maxpacket; - break; - } - ep = is_in ? ss->int_in_ep : ss->int_out_ep; - req = ss_alloc_ep_req(ep, size); - break; - default: + } else { ep = is_in ? ss->in_ep : ss->out_ep; req = ss_alloc_ep_req(ep, 0); - break; } if (!req) @@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, cdev = ss->function.config->cdev; ERROR(cdev, "start %s%s %s --> %d\n", - get_ep_string(ep_type), is_in ? "IN" : "OUT", - ep->name, status); + is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", + ep->name, status); free_ep_req(ep, req); } - if (!(ep_type == EP_ISOC)) + if (!is_iso) break; } @@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss) cdev = ss->function.config->cdev; disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, - ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); + ss->iso_out_ep); VDBG(cdev, "%s disabled\n", ss->function.name); } @@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, int speed = cdev->gadget->speed; struct usb_ep *ep; - if (alt == 2) { - /* Configure for periodic interrupt endpoint */ - ep = ss->int_in_ep; - if (ep) { - result = config_ep_by_speed(cdev->gadget, - &(ss->function), ep); - if (result) - return result; - - result = usb_ep_enable(ep); - if (result < 0) - return result; - - ep->driver_data = ss; - result = source_sink_start_ep(ss, true, EP_INTERRUPT, - speed); - if (result < 0) { -fail1: - ep = ss->int_in_ep; - if (ep) { - usb_ep_disable(ep); - ep->driver_data = NULL; - } - return result; - } - } - - /* - * one interrupt endpoint reads (sinks) anything OUT (from the - * host) - */ - ep = ss->int_out_ep; - if (ep) { - result = config_ep_by_speed(cdev->gadget, - &(ss->function), ep); - if (result) - goto fail1; - - result = usb_ep_enable(ep); - if (result < 0) - goto fail1; - - ep->driver_data = ss; - result = source_sink_start_ep(ss, false, EP_INTERRUPT, - speed); - if (result < 0) { - ep = ss->int_out_ep; - usb_ep_disable(ep); - ep->driver_data = NULL; - goto fail1; - } - } - - goto out; - } - /* one bulk endpoint writes (sources) zeroes IN (to the host) */ ep = ss->in_ep; result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); @@ -966,7 +684,7 @@ fail1: return result; ep->driver_data = ss; - result = source_sink_start_ep(ss, true, EP_BULK, speed); + result = source_sink_start_ep(ss, true, false, speed); if (result < 0) { fail: ep = ss->in_ep; @@ -985,7 +703,7 @@ fail: goto fail; ep->driver_data = ss; - result = source_sink_start_ep(ss, false, EP_BULK, speed); + result = source_sink_start_ep(ss, false, false, speed); if (result < 0) { fail2: ep = ss->out_ep; @@ -1008,7 +726,7 @@ fail2: goto fail2; ep->driver_data = ss; - result = source_sink_start_ep(ss, true, EP_ISOC, speed); + result = source_sink_start_ep(ss, true, true, speed); if (result < 0) { fail3: ep = ss->iso_in_ep; @@ -1031,14 +749,13 @@ fail3: goto fail3; ep->driver_data = ss; - result = source_sink_start_ep(ss, false, EP_ISOC, speed); + result = source_sink_start_ep(ss, false, true, speed); if (result < 0) { usb_ep_disable(ep); ep->driver_data = NULL; goto fail3; } } - out: ss->cur_alt = alt; @@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f, if (ss->in_ep->driver_data) disable_source_sink(ss); - else if (alt == 2 && ss->int_in_ep->driver_data) - disable_source_sink(ss); return enable_source_sink(cdev, ss, alt); } @@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func( isoc_maxpacket = ss_opts->isoc_maxpacket; isoc_mult = ss_opts->isoc_mult; isoc_maxburst = ss_opts->isoc_maxburst; - int_interval = ss_opts->int_interval; - int_maxpacket = ss_opts->int_maxpacket; - int_mult = ss_opts->int_mult; - int_maxburst = ss_opts->int_maxburst; buflen = ss_opts->bulk_buflen; ss->function.name = "source/sink"; @@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen = f_ss_opts_bulk_buflen_show, f_ss_opts_bulk_buflen_store); -static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page) -{ - int result; - - mutex_lock(&opts->lock); - result = sprintf(page, "%u", opts->int_interval); - mutex_unlock(&opts->lock); - - return result; -} - -static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts, - const char *page, size_t len) -{ - int ret; - u32 num; - - mutex_lock(&opts->lock); - if (opts->refcnt) { - ret = -EBUSY; - goto end; - } - - ret = kstrtou32(page, 0, &num); - if (ret) - goto end; - - if (num > 4096) { - ret = -EINVAL; - goto end; - } - - opts->int_interval = num; - ret = len; -end: - mutex_unlock(&opts->lock); - return ret; -} - -static struct f_ss_opts_attribute f_ss_opts_int_interval = - __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR, - f_ss_opts_int_interval_show, - f_ss_opts_int_interval_store); - -static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page) -{ - int result; - - mutex_lock(&opts->lock); - result = sprintf(page, "%u", opts->int_maxpacket); - mutex_unlock(&opts->lock); - - return result; -} - -static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts, - const char *page, size_t len) -{ - int ret; - u16 num; - - mutex_lock(&opts->lock); - if (opts->refcnt) { - ret = -EBUSY; - goto end; - } - - ret = kstrtou16(page, 0, &num); - if (ret) - goto end; - - if (num > 1024) { - ret = -EINVAL; - goto end; - } - - opts->int_maxpacket = num; - ret = len; -end: - mutex_unlock(&opts->lock); - return ret; -} - -static struct f_ss_opts_attribute f_ss_opts_int_maxpacket = - __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR, - f_ss_opts_int_maxpacket_show, - f_ss_opts_int_maxpacket_store); - -static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page) -{ - int result; - - mutex_lock(&opts->lock); - result = sprintf(page, "%u", opts->int_mult); - mutex_unlock(&opts->lock); - - return result; -} - -static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts, - const char *page, size_t len) -{ - int ret; - u8 num; - - mutex_lock(&opts->lock); - if (opts->refcnt) { - ret = -EBUSY; - goto end; - } - - ret = kstrtou8(page, 0, &num); - if (ret) - goto end; - - if (num > 2) { - ret = -EINVAL; - goto end; - } - - opts->int_mult = num; - ret = len; -end: - mutex_unlock(&opts->lock); - return ret; -} - -static struct f_ss_opts_attribute f_ss_opts_int_mult = - __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR, - f_ss_opts_int_mult_show, - f_ss_opts_int_mult_store); - -static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page) -{ - int result; - - mutex_lock(&opts->lock); - result = sprintf(page, "%u", opts->int_maxburst); - mutex_unlock(&opts->lock); - - return result; -} - -static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts, - const char *page, size_t len) -{ - int ret; - u8 num; - - mutex_lock(&opts->lock); - if (opts->refcnt) { - ret = -EBUSY; - goto end; - } - - ret = kstrtou8(page, 0, &num); - if (ret) - goto end; - - if (num > 15) { - ret = -EINVAL; - goto end; - } - - opts->int_maxburst = num; - ret = len; -end: - mutex_unlock(&opts->lock); - return ret; -} - -static struct f_ss_opts_attribute f_ss_opts_int_maxburst = - __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR, - f_ss_opts_int_maxburst_show, - f_ss_opts_int_maxburst_store); - static struct configfs_attribute *ss_attrs[] = { &f_ss_opts_pattern.attr, &f_ss_opts_isoc_interval.attr, @@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = { &f_ss_opts_isoc_mult.attr, &f_ss_opts_isoc_maxburst.attr, &f_ss_opts_bulk_buflen.attr, - &f_ss_opts_int_interval.attr, - &f_ss_opts_int_maxpacket.attr, - &f_ss_opts_int_mult.attr, - &f_ss_opts_int_maxburst.attr, NULL, }; @@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void) ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; - ss_opts->int_interval = GZERO_INT_INTERVAL; - ss_opts->int_maxpacket = GZERO_INT_MAXPACKET; config_group_init_type_name(&ss_opts->func_inst.group, "", &ss_func_type); diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 2ce28b9d97cc..15f180904f8a 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h @@ -10,8 +10,6 @@ #define GZERO_QLEN 32 #define GZERO_ISOC_INTERVAL 4 #define GZERO_ISOC_MAXPACKET 1024 -#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */ -#define GZERO_INT_MAXPACKET 1024 struct usb_zero_options { unsigned pattern; @@ -19,10 +17,6 @@ struct usb_zero_options { unsigned isoc_maxpacket; unsigned isoc_mult; unsigned isoc_maxburst; - unsigned int_interval; /* In ms */ - unsigned int_maxpacket; - unsigned int_mult; - unsigned int_maxburst; unsigned bulk_buflen; unsigned qlen; }; @@ -34,10 +28,6 @@ struct f_ss_opts { unsigned isoc_maxpacket; unsigned isoc_mult; unsigned isoc_maxburst; - unsigned int_interval; /* In ms */ - unsigned int_maxpacket; - unsigned int_mult; - unsigned int_maxburst; unsigned bulk_buflen; /* @@ -72,7 +62,6 @@ int lb_modinit(void); void free_ep_req(struct usb_ep *ep, struct usb_request *req); void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, - struct usb_ep *iso_in, struct usb_ep *iso_out, - struct usb_ep *int_in, struct usb_ep *int_out); + struct usb_ep *iso_in, struct usb_ep *iso_out); #endif /* __G_ZERO_H */ diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); MODULE_AUTHOR ("David Brownell"); MODULE_LICENSE ("GPL"); +static int ep_open(struct inode *, struct file *); + /*----------------------------------------------------------------------*/ @@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) * still need dev->lock to use epdata->ep. */ static int -get_ready_ep (unsigned f_flags, struct ep_data *epdata) +get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) { int val; if (f_flags & O_NONBLOCK) { if (!mutex_trylock(&epdata->lock)) goto nonblock; - if (epdata->state != STATE_EP_ENABLED) { + if (epdata->state != STATE_EP_ENABLED && + (!is_write || epdata->state != STATE_EP_READY)) { mutex_unlock(&epdata->lock); nonblock: val = -EAGAIN; @@ -305,18 +308,20 @@ nonblock: switch (epdata->state) { case STATE_EP_ENABLED: + return 0; + case STATE_EP_READY: /* not configured yet */ + if (is_write) + return 0; + // FALLTHRU + case STATE_EP_UNBOUND: /* clean disconnect */ break; // case STATE_EP_DISABLED: /* "can't happen" */ - // case STATE_EP_READY: /* "can't happen" */ default: /* error! */ pr_debug ("%s: ep %p not available, state %d\n", shortname, epdata, epdata->state); - // FALLTHROUGH - case STATE_EP_UNBOUND: /* clean disconnect */ - val = -ENODEV; - mutex_unlock(&epdata->lock); } - return val; + mutex_unlock(&epdata->lock); + return -ENODEV; } static ssize_t @@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) return value; } - -/* handle a synchronous OUT bulk/intr/iso transfer */ -static ssize_t -ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ - - value = -ENOMEM; - kbuf = kmalloc (len, GFP_KERNEL); - if (unlikely (!kbuf)) - goto free1; - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s read %zu OUT, status %d\n", - data->name, len, (int) value); - if (value >= 0 && copy_to_user (buf, kbuf, value)) - value = -EFAULT; - -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - -/* handle a synchronous IN bulk/intr/iso transfer */ -static ssize_t -ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (!usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ - - value = -ENOMEM; - kbuf = memdup_user(buf, len); - if (IS_ERR(kbuf)) { - value = PTR_ERR(kbuf); - kbuf = NULL; - goto free1; - } - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s write %zu IN, status %d\n", - data->name, len, (int) value); -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - static int ep_release (struct inode *inode, struct file *fd) { @@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) struct ep_data *data = fd->private_data; int status; - if ((status = get_ready_ep (fd->f_flags, data)) < 0) + if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) return status; spin_lock_irq (&data->dev->lock); @@ -517,8 +431,8 @@ struct kiocb_priv { struct mm_struct *mm; struct work_struct work; void *buf; - const struct iovec *iv; - unsigned long nr_segs; + struct iov_iter to; + const void *to_free; unsigned actual; }; @@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) return value; } -static ssize_t ep_copy_to_user(struct kiocb_priv *priv) -{ - ssize_t len, total; - void *to_copy; - int i; - - /* copy stuff into user buffers */ - total = priv->actual; - len = 0; - to_copy = priv->buf; - for (i=0; i < priv->nr_segs; i++) { - ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); - - if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { - if (len == 0) - len = -EFAULT; - break; - } - - total -= this; - len += this; - to_copy += this; - if (total == 0) - break; - } - - return len; -} - static void ep_user_copy_worker(struct work_struct *work) { struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); @@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) size_t ret; use_mm(mm); - ret = ep_copy_to_user(priv); + ret = copy_to_iter(priv->buf, priv->actual, &priv->to); unuse_mm(mm); + if (!ret) + ret = -EFAULT; /* completing the iocb can drop the ctx and mm, don't touch mm after */ aio_complete(iocb, ret, ret); kfree(priv->buf); + kfree(priv->to_free); kfree(priv); } @@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) * don't need to copy anything to userspace, so we can * complete the aio request immediately. */ - if (priv->iv == NULL || unlikely(req->actual == 0)) { + if (priv->to_free == NULL || unlikely(req->actual == 0)) { kfree(req->buf); + kfree(priv->to_free); kfree(priv); iocb->private = NULL; /* aio_complete() reports bytes-transferred _and_ faults */ @@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) priv->buf = req->buf; priv->actual = req->actual; + INIT_WORK(&priv->work, ep_user_copy_worker); schedule_work(&priv->work); } spin_unlock(&epdata->dev->lock); @@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) put_ep(epdata); } -static ssize_t -ep_aio_rwtail( - struct kiocb *iocb, - char *buf, - size_t len, - struct ep_data *epdata, - const struct iovec *iv, - unsigned long nr_segs -) +static ssize_t ep_aio(struct kiocb *iocb, + struct kiocb_priv *priv, + struct ep_data *epdata, + char *buf, + size_t len) { - struct kiocb_priv *priv; - struct usb_request *req; - ssize_t value; + struct usb_request *req; + ssize_t value; - priv = kmalloc(sizeof *priv, GFP_KERNEL); - if (!priv) { - value = -ENOMEM; -fail: - kfree(buf); - return value; - } iocb->private = priv; priv->iocb = iocb; - priv->iv = iv; - priv->nr_segs = nr_segs; - INIT_WORK(&priv->work, ep_user_copy_worker); - - value = get_ready_ep(iocb->ki_filp->f_flags, epdata); - if (unlikely(value < 0)) { - kfree(priv); - goto fail; - } kiocb_set_cancel_fn(iocb, ep_aio_cancel); get_ep(epdata); @@ -669,75 +538,154 @@ fail: * allocate or submit those if the host disconnected. */ spin_lock_irq(&epdata->dev->lock); - if (likely(epdata->ep)) { - req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); - if (likely(req)) { - priv->req = req; - req->buf = buf; - req->length = len; - req->complete = ep_aio_complete; - req->context = iocb; - value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); - if (unlikely(0 != value)) - usb_ep_free_request(epdata->ep, req); - } else - value = -EAGAIN; - } else - value = -ENODEV; - spin_unlock_irq(&epdata->dev->lock); + value = -ENODEV; + if (unlikely(epdata->ep)) + goto fail; - mutex_unlock(&epdata->lock); + req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); + value = -ENOMEM; + if (unlikely(!req)) + goto fail; - if (unlikely(value)) { - kfree(priv); - put_ep(epdata); - } else - value = -EIOCBQUEUED; + priv->req = req; + req->buf = buf; + req->length = len; + req->complete = ep_aio_complete; + req->context = iocb; + value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); + if (unlikely(0 != value)) { + usb_ep_free_request(epdata->ep, req); + goto fail; + } + spin_unlock_irq(&epdata->dev->lock); + return -EIOCBQUEUED; + +fail: + spin_unlock_irq(&epdata->dev->lock); + kfree(priv->to_free); + kfree(priv); + put_ep(epdata); return value; } static ssize_t -ep_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(to); + ssize_t value; + char *buf; - if (unlikely(usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) + return value; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } - return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); + return -ENOMEM; + } + if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + if (value >= 0 && copy_to_iter(buf, value, to)) + value = -EFAULT; + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (!priv) + goto fail; + priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); + if (!priv->to_free) { + kfree(priv); + goto fail; + } + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; + } +fail: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } +static ssize_t ep_config(struct ep_data *, const char *, size_t); + static ssize_t -ep_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; - size_t len = 0; - int i = 0; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(from); + bool configured; + ssize_t value; + char *buf; + + if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) + return value; - if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + configured = epdata->state == STATE_EP_ENABLED; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (configured && !usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } + + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); return -ENOMEM; + } - for (i=0; i < nr_segs; i++) { - if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, - iov[i].iov_len) != 0)) { - kfree(buf); - return -EFAULT; + if (unlikely(copy_from_iter(buf, len, from) != len)) { + value = -EFAULT; + goto out; + } + + if (unlikely(!configured)) { + value = ep_config(epdata, buf, len); + } else if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (priv) { + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; } - len += iov[i].iov_len; } - return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); +out: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } /*----------------------------------------------------------------------*/ @@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, /* used after endpoint configuration */ static const struct file_operations ep_io_operations = { .owner = THIS_MODULE, - .llseek = no_llseek, - .read = ep_read, - .write = ep_write, - .unlocked_ioctl = ep_ioctl, + .open = ep_open, .release = ep_release, - - .aio_read = ep_aio_read, - .aio_write = ep_aio_write, + .llseek = no_llseek, + .read = new_sync_read, + .write = new_sync_write, + .unlocked_ioctl = ep_ioctl, + .read_iter = ep_read_iter, + .write_iter = ep_write_iter, }; /* ENDPOINT INITIALIZATION @@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { * speed descriptor, then optional high speed descriptor. */ static ssize_t -ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) +ep_config (struct ep_data *data, const char *buf, size_t len) { - struct ep_data *data = fd->private_data; struct usb_ep *ep; u32 tag; int value, length = len; - value = mutex_lock_interruptible(&data->lock); - if (value < 0) - return value; - if (data->state != STATE_EP_READY) { value = -EL2HLT; goto fail; @@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) goto fail0; /* we might need to change message format someday */ - if (copy_from_user (&tag, buf, 4)) { - goto fail1; - } + memcpy(&tag, buf, 4); if (tag != 1) { DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); goto fail0; @@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) */ /* full/low speed descriptor, then high speed */ - if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); if (data->desc.bLength != USB_DT_ENDPOINT_SIZE || data->desc.bDescriptorType != USB_DT_ENDPOINT) goto fail0; if (len != USB_DT_ENDPOINT_SIZE) { if (len != 2 * USB_DT_ENDPOINT_SIZE) goto fail0; - if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, - USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, + USB_DT_ENDPOINT_SIZE); if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE || data->hs_desc.bDescriptorType != USB_DT_ENDPOINT) { @@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) case USB_SPEED_LOW: case USB_SPEED_FULL: ep->desc = &data->desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; case USB_SPEED_HIGH: /* fails if caller didn't provide that descriptor... */ ep->desc = &data->hs_desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; default: DBG(data->dev, "unconnected, %s init abandoned\n", data->name); value = -EINVAL; + goto gone; } + value = usb_ep_enable(ep); if (value == 0) { - fd->f_op = &ep_io_operations; + data->state = STATE_EP_ENABLED; value = length; } gone: @@ -867,14 +800,10 @@ fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } - mutex_unlock(&data->lock); return value; fail0: value = -EINVAL; goto fail; -fail1: - value = -EFAULT; - goto fail; } static int @@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) return value; } -/* used before endpoint configuration */ -static const struct file_operations ep_config_operations = { - .llseek = no_llseek, - - .open = ep_open, - .write = ep_config, - .release = ep_release, -}; - /*----------------------------------------------------------------------*/ /* EP0 IMPLEMENTATION can be partly in userspace. @@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) enum ep0_state state; spin_lock_irq (&dev->lock); + if (dev->state <= STATE_DEV_OPENED) { + retval = -EINVAL; + goto done; + } /* report fd mode change before acting on it */ if (dev->setup_abort) { @@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) struct dev_data *dev = fd->private_data; ssize_t retval = -ESRCH; - spin_lock_irq (&dev->lock); - /* report fd mode change before acting on it */ if (dev->setup_abort) { dev->setup_abort = 0; @@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } else DBG (dev, "fail %s, state %d\n", __func__, dev->state); - spin_unlock_irq (&dev->lock); return retval; } @@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) struct dev_data *dev = fd->private_data; int mask = 0; + if (dev->state <= STATE_DEV_OPENED) + return DEFAULT_POLLMASK; + poll_wait(fd, &dev->wait, wait); spin_lock_irq (&dev->lock); @@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) return ret; } -/* used after device configuration */ -static const struct file_operations ep0_io_operations = { - .owner = THIS_MODULE, - .llseek = no_llseek, - - .read = ep0_read, - .write = ep0_write, - .fasync = ep0_fasync, - .poll = ep0_poll, - .unlocked_ioctl = dev_ioctl, - .release = dev_release, -}; - /*----------------------------------------------------------------------*/ /* The in-kernel gadget driver handles most ep0 issues, in particular @@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) goto enomem1; data->dentry = gadgetfs_create_file (dev->sb, data->name, - data, &ep_config_operations); + data, &ep_io_operations); if (!data->dentry) goto enomem2; list_add_tail (&data->epfiles, &dev->epfiles); @@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; char *kbuf; + spin_lock_irq(&dev->lock); + if (dev->state > STATE_DEV_OPENED) { + value = ep0_write(fd, buf, len, ptr); + spin_unlock_irq(&dev->lock); + return value; + } + spin_unlock_irq(&dev->lock); + if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) return -EINVAL; @@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) * on, they can work ... except in cleanup paths that * kick in after the ep0 descriptor is closed. */ - fd->f_op = &ep0_io_operations; value = len; } return value; @@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) return value; } -static const struct file_operations dev_init_operations = { +static const struct file_operations ep0_operations = { .llseek = no_llseek, .open = dev_open, + .read = ep0_read, .write = dev_config, .fasync = ep0_fasync, + .poll = ep0_poll, .unlocked_ioctl = dev_ioctl, .release = dev_release, }; @@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) goto Enomem; dev->sb = sb; - dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); + dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); if (!dev->dentry) { put_dev(dev); goto Enomem; diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 3a494168661e..6e0a019aad54 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) goto err_session; } /* - * Now register the TCM vHost virtual I_T Nexus as active with the - * call to __transport_register_session() + * Now register the TCM vHost virtual I_T Nexus as active. */ - __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, tv_nexus->tvn_se_sess, tv_nexus); tpg->tpg_nexus = tv_nexus; mutex_unlock(&tpg->tpg_mutex); diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index ff97ac93ac03..5ee95152493c 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c @@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = { .isoc_maxpacket = GZERO_ISOC_MAXPACKET, .bulk_buflen = GZERO_BULK_BUFLEN, .qlen = GZERO_QLEN, - .int_interval = GZERO_INT_INTERVAL, - .int_maxpacket = GZERO_INT_MAXPACKET, }; /*-------------------------------------------------------------------------*/ @@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); -module_param_named(int_interval, gzero_options.int_interval, uint, - S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(int_interval, "1 - 16"); - -module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint, - S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)"); - -module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)"); - -module_param_named(int_maxburst, gzero_options.int_maxburst, uint, - S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)"); - static struct usb_function *func_lb; static struct usb_function_instance *func_inst_lb; @@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev) ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; ss_opts->isoc_mult = gzero_options.isoc_mult; ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; - ss_opts->int_interval = gzero_options.int_interval; - ss_opts->int_maxpacket = gzero_options.int_maxpacket; - ss_opts->int_mult = gzero_options.int_mult; - ss_opts->int_maxburst = gzero_options.int_maxburst; ss_opts->bulk_buflen = gzero_options.bulk_buflen; func_ss = usb_get_function(func_inst_ss); diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 663f7908b15c..be0964a801e8 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel"; struct atmel_ehci_priv { struct clk *iclk; - struct clk *fclk; struct clk *uclk; bool clocked; }; @@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci) { if (atmel_ehci->clocked) return; - if (IS_ENABLED(CONFIG_COMMON_CLK)) { - clk_set_rate(atmel_ehci->uclk, 48000000); - clk_prepare_enable(atmel_ehci->uclk); - } + + clk_prepare_enable(atmel_ehci->uclk); clk_prepare_enable(atmel_ehci->iclk); - clk_prepare_enable(atmel_ehci->fclk); atmel_ehci->clocked = true; } @@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci) { if (!atmel_ehci->clocked) return; - clk_disable_unprepare(atmel_ehci->fclk); + clk_disable_unprepare(atmel_ehci->iclk); - if (IS_ENABLED(CONFIG_COMMON_CLK)) - clk_disable_unprepare(atmel_ehci->uclk); + clk_disable_unprepare(atmel_ehci->uclk); atmel_ehci->clocked = false; } @@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) retval = -ENOENT; goto fail_request_resource; } - atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); - if (IS_ERR(atmel_ehci->fclk)) { - dev_err(&pdev->dev, "Error getting function clock\n"); - retval = -ENOENT; + + atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); + if (IS_ERR(atmel_ehci->uclk)) { + dev_err(&pdev->dev, "failed to get uclk\n"); + retval = PTR_ERR(atmel_ehci->uclk); goto fail_request_resource; } - if (IS_ENABLED(CONFIG_COMMON_CLK)) { - atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); - if (IS_ERR(atmel_ehci->uclk)) { - dev_err(&pdev->dev, "failed to get uclk\n"); - retval = PTR_ERR(atmel_ehci->uclk); - goto fail_request_resource; - } - } ehci = hcd_to_ehci(hcd); /* registers start at offset 0x0 */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5fb66db89e05..73485fa4372f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, if (!command) return; - ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; + ep->ep_state |= EP_HALTED; ep->stopped_stream = stream_id; xhci_queue_reset_ep(xhci, command, slot_id, ep_index); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b06d1a53652d..ec8ac1674854 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) goto exit; } - /* Reject urb if endpoint is in soft reset, queue must stay empty */ - if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) { - xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n"); - ret = -EINVAL; - } - if (usb_endpoint_xfer_isoc(&urb->ep->desc)) size = urb->number_of_packets; else @@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, } } -/* Called after clearing a halted device. USB core should have sent the control +/* Called when clearing halted device. The core should have sent the control * message to clear the device halt condition. The host side of the halt should - * already be cleared with a reset endpoint command issued immediately when the - * STALL tx event was received. + * already be cleared with a reset endpoint command issued when the STALL tx + * event was received. + * + * Context: in_interrupt */ void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct usb_device *udev; - struct xhci_virt_device *virt_dev; - struct xhci_virt_ep *virt_ep; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_command *command; - unsigned int ep_index, ep_state; - unsigned long flags; - u32 ep_flag; xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - if (!ep->hcpriv) - return; - virt_dev = xhci->devs[udev->slot_id]; - ep_index = xhci_get_endpoint_index(&ep->desc); - virt_ep = &virt_dev->eps[ep_index]; - ep_state = virt_ep->ep_state; /* - * Implement the config ep command in xhci 4.6.8 additional note: + * We might need to implement the config ep cmd in xhci 4.8.1 note: * The Reset Endpoint Command may only be issued to endpoints in the * Halted state. If software wishes reset the Data Toggle or Sequence * Number of an endpoint that isn't in the Halted state, then software @@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, * for the target endpoint. that is in the Stopped state. */ - if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { - virt_ep->ep_state &= ~EP_RECENTLY_HALTED; - xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); - return; - } - - /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */ - if (usb_endpoint_xfer_control(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) - return; - - ep_flag = xhci_get_endpoint_flag(&ep->desc); - - if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) - return; - - command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT); - if (!command) { - xhci_err(xhci, "Could not allocate xHCI command structure.\n"); - return; - } - - spin_lock_irqsave(&xhci->lock, flags); - - /* block ringing ep doorbell */ - virt_ep->ep_state |= EP_CONFIG_PENDING; - - /* - * Make sure endpoint ring is empty before resetting the toggle/seq. - * Driver is required to synchronously cancel all transfer request. - * - * xhci 4.6.6 says we can issue a configure endpoint command on a - * running endpoint ring as long as it's idle (queue empty) - */ - - if (!list_empty(&virt_ep->ring->td_list)) { - dev_err(&udev->dev, "EP not empty, refuse reset\n"); - spin_unlock_irqrestore(&xhci->lock, flags); - goto cleanup; - } - - xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n", - udev->slot_id, ep_index); - - ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); - if (!ctrl_ctx) { - xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n", - virt_dev, virt_dev->in_ctx); - spin_unlock_irqrestore(&xhci->lock, flags); - goto cleanup; - } - xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, - virt_dev->out_ctx, ctrl_ctx, - ep_flag, ep_flag); - xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index); - - xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma, - udev->slot_id, false); - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - wait_for_completion(command->completion); - -cleanup: - virt_ep->ep_state &= ~EP_CONFIG_PENDING; - xhci_free_command(xhci, command); + /* For now just print debug to follow the situation */ + xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", + ep->desc.bEndpointAddress); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 265ab1771d24..8e421b89632d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -865,8 +865,6 @@ struct xhci_virt_ep { #define EP_HAS_STREAMS (1 << 4) /* Transitioning the endpoint to not using streams, don't enqueue URBs */ #define EP_GETTING_NO_STREAMS (1 << 5) -#define EP_RECENTLY_HALTED (1 << 6) -#define EP_CONFIG_PENDING (1 << 7) /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; struct xhci_td *stopped_td; diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c index b9827556455f..bfa402cf3a27 100644 --- a/drivers/usb/isp1760/isp1760-core.c +++ b/drivers/usb/isp1760/isp1760-core.c @@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags, } if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { - ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | - IRQF_DISABLED); + ret = isp1760_udc_register(isp, irq, irqflags); if (ret < 0) { isp1760_hcd_unregister(&isp->hcd); return ret; diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index 9612d7990565..f32c292cc868 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c @@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct isp1760_udc *udc = gadget_to_udc(gadget); + unsigned long flags; /* The hardware doesn't support low speed. */ if (driver->max_speed < USB_SPEED_FULL) { @@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, return -EINVAL; } - spin_lock(&udc->lock); + spin_lock_irqsave(&udc->lock, flags); if (udc->driver) { dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); @@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, udc->driver = driver; - spin_unlock(&udc->lock); + spin_unlock_irqrestore(&udc->lock, flags); dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", driver->function); @@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, static int isp1760_udc_stop(struct usb_gadget *gadget) { struct isp1760_udc *udc = gadget_to_udc(gadget); + unsigned long flags; dev_dbg(udc->isp->dev, "%s\n", __func__); @@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget) isp1760_udc_write(udc, DC_MODE, 0); - spin_lock(&udc->lock); + spin_lock_irqsave(&udc->lock, flags); udc->driver = NULL; - spin_unlock(&udc->lock); + spin_unlock_irqrestore(&udc->lock, flags); return 0; } @@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc) return -ENODEV; } - if (chipid != 0x00011582) { + if (chipid != 0x00011582 && chipid != 0x00158210) { dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); return -ENODEV; } @@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq, sprintf(udc->irqname, "%s (udc)", devname); - ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | - irqflags, udc->irqname, udc); + ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags, + udc->irqname, udc); if (ret < 0) goto error; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 14e1628483d9..39db8b603627 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010 config USB_MUSB_OMAP2PLUS tristate "OMAP2430 and onwards" - depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY + depends on ARCH_OMAP2PLUS && USB + depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY select GENERIC_PHY config USB_MUSB_AM35X diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 403fab772724..7b3035ff9434 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c @@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev) return NULL; dev = bus_find_device(&platform_bus_type, NULL, node, match); + if (!dev) + return NULL; + ctrl_usb = dev_get_drvdata(dev); if (!ctrl_usb) return NULL; diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 82570425fdfe..c85ea530085f 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ +UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, + "Initio Corporation", + "", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, "JMicron", diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, func = vfio_pci_set_err_trigger; break; } + break; case VFIO_PCI_REQ_IRQ_INDEX: switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_TRIGGER: func = vfio_pci_set_req_trigger; break; } + break; } if (!func) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8d4f3f1ff799..71df240a467a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, goto out; } /* - * Now register the TCM vhost virtual I_T Nexus as active with the - * call to __transport_register_session() + * Now register the TCM vhost virtual I_T Nexus as active. */ - __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, tv_nexus->tvn_se_sess, tv_nexus); tpg->tpg_nexus = tv_nexus; diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, len = clcdfb_snprintf_mode(NULL, 0, mode); name = devm_kzalloc(dev, len + 1, GFP_KERNEL); + if (!name) + return -ENOMEM; + clcdfb_snprintf_mode(name, len + 1, mode); mode->name = name; diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, int num = 0, i, first = 1; int ver, rev; - ver = edid[EDID_STRUCT_VERSION]; - rev = edid[EDID_STRUCT_REVISION]; - mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); if (mode == NULL) return NULL; @@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, return NULL; } + ver = edid[EDID_STRUCT_VERSION]; + rev = edid[EDID_STRUCT_REVISION]; + *dbsize = 0; DPRINTK(" Detailed Timings\n"); diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c @@ -28,44 +28,22 @@ #include <video/omapdss.h> #include "dss.h" -static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) +static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = NULL; - - for_each_dss_dev(dssdev) { - if (dssdev->dev == dev) { - omap_dss_put_device(dssdev); - return dssdev; - } - } - - return NULL; -} - -static ssize_t display_name_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", dssdev->name ? dssdev->name : ""); } -static ssize_t display_enabled_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", omapdss_device_is_enabled(dssdev)); } -static ssize_t display_enabled_store(struct device *dev, - struct device_attribute *attr, +static ssize_t display_enabled_store(struct omap_dss_device *dssdev, const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool enable; @@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, return size; } -static ssize_t display_tear_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); return snprintf(buf, PAGE_SIZE, "%d\n", dssdev->driver->get_te ? dssdev->driver->get_te(dssdev) : 0); } -static ssize_t display_tear_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_tear_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool te; @@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, return size; } -static ssize_t display_timings_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); struct omap_video_timings t; if (!dssdev->driver->get_timings) @@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, t.y_res, t.vfp, t.vbp, t.vsw); } -static ssize_t display_timings_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_timings_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); struct omap_video_timings t = dssdev->panel.timings; int r, found; @@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, return size; } -static ssize_t display_rotate_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int rotate; if (!dssdev->driver->get_rotate) return -ENOENT; @@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", rotate); } -static ssize_t display_rotate_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_rotate_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int rot, r; if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) @@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, return size; } -static ssize_t display_mirror_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int mirror; if (!dssdev->driver->get_mirror) return -ENOENT; @@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", mirror); } -static ssize_t display_mirror_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_mirror_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool mirror; @@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, return size; } -static ssize_t display_wss_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); unsigned int wss; if (!dssdev->driver->get_wss) @@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); } -static ssize_t display_wss_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_wss_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); u32 wss; int r; @@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, return size; } -static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); -static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, +struct display_attribute { + struct attribute attr; + ssize_t (*show)(struct omap_dss_device *, char *); + ssize_t (*store)(struct omap_dss_device *, const char *, size_t); +}; + +#define DISPLAY_ATTR(_name, _mode, _show, _store) \ + struct display_attribute display_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, display_enabled_show, display_enabled_store); -static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, display_tear_show, display_tear_store); -static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, display_timings_show, display_timings_store); -static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, display_rotate_show, display_rotate_store); -static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, display_mirror_show, display_mirror_store); -static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, display_wss_show, display_wss_store); -static const struct attribute *display_sysfs_attrs[] = { - &dev_attr_display_name.attr, - &dev_attr_enabled.attr, - &dev_attr_tear_elim.attr, - &dev_attr_timings.attr, - &dev_attr_rotate.attr, - &dev_attr_mirror.attr, - &dev_attr_wss.attr, +static struct attribute *display_sysfs_attrs[] = { + &display_attr_name.attr, + &display_attr_display_name.attr, + &display_attr_enabled.attr, + &display_attr_tear_elim.attr, + &display_attr_timings.attr, + &display_attr_rotate.attr, + &display_attr_mirror.attr, + &display_attr_wss.attr, NULL }; +static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->show) + return -ENOENT; + + return display_attr->show(dssdev, buf); +} + +static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->store) + return -ENOENT; + + return display_attr->store(dssdev, buf, size); +} + +static const struct sysfs_ops display_sysfs_ops = { + .show = display_attr_show, + .store = display_attr_store, +}; + +static struct kobj_type display_ktype = { + .sysfs_ops = &display_sysfs_ops, + .default_attrs = display_sysfs_attrs, +}; + int display_init_sysfs(struct platform_device *pdev) { struct omap_dss_device *dssdev = NULL; int r; for_each_dss_dev(dssdev) { - struct kobject *kobj = &dssdev->dev->kobj; - - r = sysfs_create_files(kobj, display_sysfs_attrs); + r = kobject_init_and_add(&dssdev->kobj, &display_ktype, + &pdev->dev.kobj, dssdev->alias); if (r) { DSSERR("failed to create sysfs files\n"); - goto err; - } - - r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); - if (r) { - sysfs_remove_files(kobj, display_sysfs_attrs); - - DSSERR("failed to create sysfs display link\n"); + omap_dss_put_device(dssdev); goto err; } } @@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) struct omap_dss_device *dssdev = NULL; for_each_dss_dev(dssdev) { - sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); - sysfs_remove_files(&dssdev->dev->kobj, - display_sysfs_attrs); + if (kobject_name(&dssdev->kobj) == NULL) + continue; + + kobject_del(&dssdev->kobj); + kobject_put(&dssdev->kobj); + + memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); } } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0413157f3b49..6a356e344f82 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/balloon_compaction.h> #include <linux/oom.h> +#include <linux/wait.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self, static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; + DEFINE_WAIT_FUNC(wait, woken_wake_function); set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); - wait_event_interruptible(vb->config_change, - (diff = towards_target(vb)) != 0 - || vb->need_stats_update - || kthread_should_stop() - || freezing(current)); + + add_wait_queue(&vb->config_change, &wait); + for (;;) { + if ((diff = towards_target(vb)) != 0 || + vb->need_stats_update || + kthread_should_stop() || + freezing(current)) + break; + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(&vb->config_change, &wait); + if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) @@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev) if (err < 0) goto out_oom_notify; + virtio_device_ready(vdev); + vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index cad569890908..6010d7ec0a0f 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - u8 *ptr = buf; - int i; + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; - for (i = 0; i < len; i++) - ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); + if (vm_dev->version == 1) { + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = readb(base + offset + i); + return; + } + + switch (len) { + case 1: + b = readb(base + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(readw(base + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(base + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } } static void vm_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - const u8 *ptr = buf; - int i; + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; - for (i = 0; i < len; i++) - writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); + if (vm_dev->version == 1) { + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + writeb(ptr[i], base + offset + i); + + return; + } + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + writeb(b, base + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + writew(le16_to_cpu(w), base + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + memcpy(&l, buf + sizeof l, sizeof l); + writel(le32_to_cpu(l), base + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vm_generation(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->version == 1) + return 0; + else + return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); } static u8 vm_get_status(struct virtio_device *vdev) @@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev) static const struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, .set = vm_set, + .generation = vm_generation, .get_status = vm_get_status, .set_status = vm_set_status, .reset = vm_reset, diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index c8def68d9e4c..0deaa4f971f5 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c @@ -42,10 +42,10 @@ #define PDC_WDT_MIN_TIMEOUT 1 #define PDC_WDT_DEF_TIMEOUT 64 -static int heartbeat; +static int heartbeat = PDC_WDT_DEF_TIMEOUT; module_param(heartbeat, int, 0); -MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " - "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " + "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); @@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev) pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; pdc_wdt->wdt_dev.parent = &pdev->dev; + watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); if (ret < 0) { @@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev) watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); platform_set_drvdata(pdev, pdc_wdt); - watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); ret = watchdog_register_device(&pdc_wdt->wdt_dev); if (ret) diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a87f6df6e85f..938b987de551 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev) u32 reg; struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base = mtk_wdt->wdt_base; - u32 ret; + int ret; ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); if (ret < 0) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); - if (rc != 0) { - pr_err("irq%d: Failed to set port to irq mapping (%d)\n", - irq, rc); - xen_evtchn_close(evtchn); - return 0; - } + if (rc) + goto err; + bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + rc = xen_evtchn_port_setup(info); + if (rc) + goto err; + out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; + +err: + pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); + xen_evtchn_close(evtchn); + return 0; } static unsigned int startup_pirq(struct irq_data *data) diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -16,7 +16,7 @@ #include "conf_space.h" #include "conf_space_quirks.h" -static bool permissive; +bool permissive; module_param(permissive, bool, 0644); /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h @@ -64,6 +64,8 @@ struct config_field_entry { void *data; }; +extern bool permissive; + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) /* Add fields to a device - the add_fields macro expects to get a pointer to diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -11,6 +11,10 @@ #include "pciback.h" #include "conf_space.h" +struct pci_cmd_info { + u16 val; +}; + struct pci_bar_info { u32 val; u32 len_val; @@ -20,22 +24,36 @@ struct pci_bar_info { #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) -static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +/* Bits guests are allowed to control in permissive mode. */ +#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ + PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ + PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) + +static void *command_init(struct pci_dev *dev, int offset) { - int i; - int ret; - - ret = xen_pcibk_read_config_word(dev, offset, value, data); - if (!pci_is_enabled(dev)) - return ret; - - for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (dev->resource[i].flags & IORESOURCE_IO) - *value |= PCI_COMMAND_IO; - if (dev->resource[i].flags & IORESOURCE_MEM) - *value |= PCI_COMMAND_MEMORY; + struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + int err; + + if (!cmd) + return ERR_PTR(-ENOMEM); + + err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); + if (err) { + kfree(cmd); + return ERR_PTR(err); } + return cmd; +} + +static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +{ + int ret = pci_read_config_word(dev, offset, value); + const struct pci_cmd_info *cmd = data; + + *value &= PCI_COMMAND_GUEST; + *value |= cmd->val & ~PCI_COMMAND_GUEST; + return ret; } @@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; + u16 val; + struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { @@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) } } + cmd->val = value; + + if (!permissive && (!dev_data || !dev_data->permissive)) + return 0; + + /* Only allow the guest to control certain bits. */ + err = pci_read_config_word(dev, offset, &val); + if (err || val == value) + return err; + + value &= PCI_COMMAND_GUEST; + value |= val & ~PCI_COMMAND_GUEST; + return pci_write_config_word(dev, offset, value); } @@ -282,6 +315,8 @@ static const struct config_field header_common[] = { { .offset = PCI_COMMAND, .size = 2, + .init = command_init, + .release = bar_release, .u.w.read = command_read, .u.w.write = command_write, }, diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 9faca6a60bb0..42bd55a6c237 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg, name); goto out; } - /* - * Now register the TCM pvscsi virtual I_T Nexus as active with the - * call to __transport_register_session() - */ - __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + /* Now register the TCM pvscsi virtual I_T Nexus as active. */ + transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, tv_nexus->tvn_se_sess, tv_nexus); tpg->tpg_nexus = tv_nexus; diff --git a/fs/affs/file.c b/fs/affs/file.c index d2468bf95669..a91795e01a7f 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, boff = tmp % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); - if (IS_ERR(bh)) - return PTR_ERR(bh); + if (IS_ERR(bh)) { + written = PTR_ERR(bh); + goto err_first_bh; + } tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); @@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); - if (IS_ERR(bh)) - return PTR_ERR(bh); + if (IS_ERR(bh)) { + written = PTR_ERR(bh); + goto err_first_bh; + } } while (from + bsize <= to) { prev_bh = bh; bh = affs_getemptyblk_ino(inode, bidx); if (IS_ERR(bh)) - goto out; + goto err_bh; memcpy(AFFS_DATA(bh), data + from, bsize); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); @@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, prev_bh = bh; bh = affs_bread_ino(inode, bidx, 1); if (IS_ERR(bh)) - goto out; + goto err_bh; tmp = min(bsize, to - from); BUG_ON(tmp > bsize); memcpy(AFFS_DATA(bh), data + from, tmp); @@ -790,12 +794,13 @@ done: if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; +err_first_bh: unlock_page(page); page_cache_release(page); return written; -out: +err_bh: bh = prev_bh; if (!written) written = PTR_ERR(bh); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 84c3b00f3de8..f9c89cae39ee 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); int btrfs_free_block_groups(struct btrfs_fs_info *info); int btrfs_read_block_groups(struct btrfs_root *root); @@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, loff_t actual_len, u64 *alloc_hint); int btrfs_inode_check_errors(struct inode *inode); extern const struct dentry_operations btrfs_dentry_operations; +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +void btrfs_test_inode_set_ops(struct inode *inode); +#endif /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f79f38542a73..639f2663ed3f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, } if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)) { - printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", + printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", btrfs_super_sys_array_size(sb), sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6f080451fcb1..8b353ad02f03 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3325,6 +3325,32 @@ out: return ret; } +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_block_group_cache *cache, *tmp; + struct btrfs_transaction *cur_trans = trans->transaction; + struct btrfs_path *path; + + if (list_empty(&cur_trans->dirty_bgs) || + !btrfs_test_opt(root, SPACE_CACHE)) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Could add new block groups, use _safe just in case */ + list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, + dirty_list) { + if (cache->disk_cache_state == BTRFS_DC_CLEAR) + cache_save_setup(cache, trans, path); + } + + btrfs_free_path(path); + return 0; +} + int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -5110,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents++; + nr_extents = (unsigned)div64_u64(num_bytes + + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + BTRFS_I(inode)->outstanding_extents += nr_extents; + nr_extents = 0; if (BTRFS_I(inode)->outstanding_extents > BTRFS_I(inode)->reserved_extents) @@ -5255,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) if (dropped > 0) to_free += btrfs_calc_trans_metadata_size(root, dropped); + if (btrfs_test_is_dummy_root(root)) + return; + trace_btrfs_space_reservation(root->fs_info, "delalloc", btrfs_ino(inode), to_free, 0); if (root->fs_info->quota_enabled) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c7233ff1d533..d688cfe5d496 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb) /* Should be safe to release our pages at this point */ btrfs_release_extent_buffer_page(eb); +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS + if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { + __free_extent_buffer(eb); + return 1; + } +#endif call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); return 1; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da828cf5e8f8..d2e732d7af52 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, static int btrfs_dirty_inode(struct inode *inode); +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +void btrfs_test_inode_set_ops(struct inode *inode) +{ + BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; +} +#endif + static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, const struct qstr *qstr) @@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode, u64 new_size; /* - * We need the largest size of the remaining extent to see if we - * need to add a new outstanding extent. Think of the following - * case - * - * [MEAX_EXTENT_SIZEx2 - 4k][4k] - * - * The new_size would just be 4k and we'd think we had enough - * outstanding extents for this if we only took one side of the - * split, same goes for the other direction. We need to see if - * the larger size still is the same amount of extents as the - * original size, because if it is we need to add a new - * outstanding extent. But if we split up and the larger size - * is less than the original then we are good to go since we've - * already accounted for the extra extent in our original - * accounting. + * See the explanation in btrfs_merge_extent_hook, the same + * applies here, just in reverse. */ new_size = orig->end - split + 1; - if ((split - orig->start) > new_size) - new_size = split - orig->start; - - num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, + num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); - if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE) < num_extents) + new_size = split - orig->start; + num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE) >= num_extents) return; } @@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode, if (!(other->state & EXTENT_DELALLOC)) return; - old_size = other->end - other->start + 1; - new_size = old_size + (new->end - new->start + 1); + if (new->start > other->start) + new_size = new->end - other->start + 1; + else + new_size = other->end - new->start + 1; /* we're not bigger than the max, unreserve the space and go */ if (new_size <= BTRFS_MAX_EXTENT_SIZE) { @@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode, } /* - * If we grew by another max_extent, just return, we want to keep that - * reserved amount. + * We have to add up either side to figure out how many extents were + * accounted for before we merged into one big extent. If the number of + * extents we accounted for is <= the amount we need for the new range + * then we can return, otherwise drop. Think of it like this + * + * [ 4k][MAX_SIZE] + * + * So we've grown the extent by a MAX_SIZE extent, this would mean we + * need 2 outstanding extents, on one side we have 1 and the other side + * we have 1 so they are == and we can return. But in this case + * + * [MAX_SIZE+4k][MAX_SIZE+4k] + * + * Each range on their own accounts for 2 extents, but merged together + * they are only 3 extents worth of accounting, so we need to drop in + * this case. */ + old_size = other->end - other->start + 1; num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); + old_size = new->end - new->start + 1; + num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE) > num_extents) + BTRFS_MAX_EXTENT_SIZE) >= num_extents) return; spin_lock(&BTRFS_I(inode)->lock); @@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode, spin_unlock(&BTRFS_I(inode)->lock); } + /* For sanity tests */ + if (btrfs_test_is_dummy_root(root)) + return; + __percpu_counter_add(&root->fs_info->delalloc_bytes, len, root->fs_info->delalloc_batch); spin_lock(&BTRFS_I(inode)->lock); @@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode, root != root->fs_info->tree_root) btrfs_delalloc_release_metadata(inode, len); + /* For sanity tests. */ + if (btrfs_test_is_dummy_root(root)) + return; + if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && do_list && !(state->state & EXTENT_NORESERVE)) btrfs_free_reserved_data_space(inode, len); @@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; - u64 orig_len = len; + u64 *outstanding_extents = NULL; int unlock_bits = EXTENT_LOCKED; int ret = 0; @@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, lockstart = start; lockend = start + len - 1; + if (current->journal_info) { + /* + * Need to pull our outstanding extents and set journal_info to NULL so + * that anything that needs to check if there's a transction doesn't get + * confused. + */ + outstanding_extents = current->journal_info; + current->journal_info = NULL; + } + /* * If this errors out it's because we couldn't invalidate pagecache for * this range and we need to fallback to buffered. @@ -7348,11 +7381,20 @@ unlock: if (start + len > i_size_read(inode)) i_size_write(inode, start + len); - if (len < orig_len) { + /* + * If we have an outstanding_extents count still set then we're + * within our reservation, otherwise we need to adjust our inode + * counter appropriately. + */ + if (*outstanding_extents) { + (*outstanding_extents)--; + } else { spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); } + + current->journal_info = outstanding_extents; btrfs_free_reserved_data_space(inode, len); } @@ -7376,6 +7418,8 @@ unlock: unlock_err: clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_bits, 1, 0, &cached_state, GFP_NOFS); + if (outstanding_extents) + current->journal_info = outstanding_extents; return ret; } @@ -8075,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; + u64 outstanding_extents = 0; size_t count = 0; int flags = 0; bool wakeup = true; @@ -8112,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, ret = btrfs_delalloc_reserve_space(inode, count); if (ret) goto out; + outstanding_extents = div64_u64(count + + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + + /* + * We need to know how many extents we reserved so that we can + * do the accounting properly if we go over the number we + * originally calculated. Abuse current->journal_info for this. + */ + current->journal_info = &outstanding_extents; } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags)) { inode_dio_done(inode); @@ -8124,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, iter, offset, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, flags); if (rw & WRITE) { + current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) btrfs_delalloc_release_space(inode, count); else if (ret >= 0 && (size_t)ret < count) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 97159a8e91d4..058c79eecbfb 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1, if (oper1->seq < oper2->seq) return -1; if (oper1->seq > oper2->seq) - return -1; + return 1; if (oper1->ref_root < oper2->ref_root) return -1; if (oper1->ref_root > oper2->ref_root) diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index a116b55ce788..054fc0d97131 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -911,6 +911,197 @@ out: return ret; } +static int test_extent_accounting(void) +{ + struct inode *inode = NULL; + struct btrfs_root *root = NULL; + int ret = -ENOMEM; + + inode = btrfs_new_test_inode(); + if (!inode) { + test_msg("Couldn't allocate inode\n"); + return ret; + } + + root = btrfs_alloc_dummy_root(); + if (IS_ERR(root)) { + test_msg("Couldn't allocate root\n"); + goto out; + } + + root->fs_info = btrfs_alloc_dummy_fs_info(); + if (!root->fs_info) { + test_msg("Couldn't allocate dummy fs info\n"); + goto out; + } + + BTRFS_I(inode)->root = root; + btrfs_test_inode_set_ops(inode); + + /* [BTRFS_MAX_EXTENT_SIZE] */ + BTRFS_I(inode)->outstanding_extents++; + ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, + NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 1) { + ret = -EINVAL; + test_msg("Miscount, wanted 1, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* [BTRFS_MAX_EXTENT_SIZE][4k] */ + BTRFS_I(inode)->outstanding_extents++; + ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, + BTRFS_MAX_EXTENT_SIZE + 4095, NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 2) { + ret = -EINVAL; + test_msg("Miscount, wanted 2, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE >> 1, + (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, + EXTENT_DELALLOC | EXTENT_DIRTY | + EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, + NULL, GFP_NOFS); + if (ret) { + test_msg("clear_extent_bit returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 2) { + ret = -EINVAL; + test_msg("Miscount, wanted 2, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* [BTRFS_MAX_EXTENT_SIZE][4K] */ + BTRFS_I(inode)->outstanding_extents++; + ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, + (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, + NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 2) { + ret = -EINVAL; + test_msg("Miscount, wanted 2, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* + * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] + * + * I'm artificially adding 2 to outstanding_extents because in the + * buffered IO case we'd add things up as we go, but I don't feel like + * doing that here, this isn't the interesting case we want to test. + */ + BTRFS_I(inode)->outstanding_extents += 2; + ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, + (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, + NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 4) { + ret = -EINVAL; + test_msg("Miscount, wanted 4, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ + BTRFS_I(inode)->outstanding_extents++; + ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, + BTRFS_MAX_EXTENT_SIZE+8191, NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 3) { + ret = -EINVAL; + test_msg("Miscount, wanted 3, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE+4096, + BTRFS_MAX_EXTENT_SIZE+8191, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, + NULL, GFP_NOFS); + if (ret) { + test_msg("clear_extent_bit returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 4) { + ret = -EINVAL; + test_msg("Miscount, wanted 4, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* + * Refill the hole again just for good measure, because I thought it + * might fail and I'd rather satisfy my paranoia at this point. + */ + BTRFS_I(inode)->outstanding_extents++; + ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, + BTRFS_MAX_EXTENT_SIZE+8191, NULL); + if (ret) { + test_msg("btrfs_set_extent_delalloc returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents != 3) { + ret = -EINVAL; + test_msg("Miscount, wanted 3, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + + /* Empty */ + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, + NULL, GFP_NOFS); + if (ret) { + test_msg("clear_extent_bit returned %d\n", ret); + goto out; + } + if (BTRFS_I(inode)->outstanding_extents) { + ret = -EINVAL; + test_msg("Miscount, wanted 0, got %u\n", + BTRFS_I(inode)->outstanding_extents); + goto out; + } + ret = 0; +out: + if (ret) + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, + NULL, GFP_NOFS); + iput(inode); + btrfs_free_dummy_root(root); + return ret; +} + int btrfs_test_inodes(void) { int ret; @@ -924,5 +1115,9 @@ int btrfs_test_inodes(void) if (ret) return ret; test_msg("Running hole first btrfs_get_extent test\n"); - return test_hole_first(); + ret = test_hole_first(); + if (ret) + return ret; + test_msg("Running outstanding_extents tests\n"); + return test_extent_accounting(); } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 88e51aded6bd..8be4278e25e8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, u64 old_root_bytenr; u64 old_root_used; struct btrfs_root *tree_root = root->fs_info->tree_root; - bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); old_root_used = btrfs_root_used(&root->root_item); - btrfs_write_dirty_block_groups(trans, root); while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start && - old_root_used == btrfs_root_used(&root->root_item) && - (!extent_root || - list_empty(&trans->transaction->dirty_bgs))) + old_root_used == btrfs_root_used(&root->root_item)) break; btrfs_set_root_node(&root->root_item, root->node); @@ -1044,14 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, return ret; old_root_used = btrfs_root_used(&root->root_item); - if (extent_root) { - ret = btrfs_write_dirty_block_groups(trans, root); - if (ret) - return ret; - } - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - if (ret) - return ret; } return 0; @@ -1068,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; + struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; struct list_head *next; struct extent_buffer *eb; int ret; @@ -1095,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, if (ret) return ret; + ret = btrfs_setup_space_cache(trans, root); + if (ret) + return ret; + /* run_qgroups might have added some more refs */ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; - +again: while (!list_empty(&fs_info->dirty_cowonly_roots)) { next = fs_info->dirty_cowonly_roots.next; list_del_init(next); @@ -1112,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, ret = update_cowonly_root(trans, root); if (ret) return ret; + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + if (ret) + return ret; } + while (!list_empty(dirty_bgs)) { + ret = btrfs_write_dirty_block_groups(trans, root); + if (ret) + return ret; + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + if (ret) + return ret; + } + + if (!list_empty(&fs_info->dirty_cowonly_roots)) + goto again; + list_add_tail(&fs_info->extent_root->dirty_list, &trans->transaction->switch_commits); btrfs_after_dev_replace_commit(fs_info); @@ -1811,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, wait_for_commit(root, cur_trans); + if (unlikely(cur_trans->aborted)) + ret = cur_trans->aborted; + btrfs_put_transaction(cur_trans); return ret; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ed19a7d622fa..39706c57ad3c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) newpage = buf->page; - if (WARN_ON(!PageUptodate(newpage))) - return -EIO; + if (!PageUptodate(newpage)) + SetPageUptodate(newpage); ClearPageMappedToDisk(newpage); @@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, return err; } +static int fuse_dev_open(struct inode *inode, struct file *file) +{ + /* + * The fuse device's file's private_data is used to hold + * the fuse_conn(ection) when it is mounted, and is used to + * keep track of whether the file has been mounted already. + */ + file->private_data = NULL; + return 0; +} + static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { @@ -1797,6 +1808,9 @@ copy_finish: static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { + /* Don't try to move pages (yet) */ + cs->move_pages = 0; + switch (code) { case FUSE_NOTIFY_POLL: return fuse_notify_poll(fc, size, cs); @@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, + .open = fuse_dev_open, .llseek = no_llseek, .read = do_sync_read, .aio_read = fuse_dev_read, diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 6e560d56094b..754fdf8c6356 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c @@ -131,13 +131,16 @@ skip: hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); - if (new_node) { - /* update parent key if we inserted a key - * at the start of the first node - */ - if (!rec && new_node != node) - hfs_brec_update_parent(fd); + /* + * update parent key if we inserted a key + * at the start of the node and it is not the new node + */ + if (!rec && new_node != node) { + hfs_bnode_read_key(node, fd->search_key, data_off + size); + hfs_brec_update_parent(fd); + } + if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); @@ -168,9 +171,6 @@ skip: goto again; } - if (!rec) - hfs_brec_update_parent(fd); - return 0; } @@ -370,6 +370,8 @@ again: if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd, hfs_find_rec_by_key); + if (fd->record < 0) + return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index b684e8a132e6..2bacb9988566 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, goto out_free; } + of->event = atomic_read(&of->kn->attr.open->event); ops = kernfs_ops(of->kn); if (ops->read) len = ops->read(of, buf, len, *ppos); diff --git a/fs/locks.c b/fs/locks.c index f1bad681fc1c..528fedfda15e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1728,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner) break; } } - trace_generic_delete_lease(inode, fl); + trace_generic_delete_lease(inode, victim); if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 3c1bfa155571..1028a0629543 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -587,8 +587,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); - nfsd4_cb_layout_fail(ls); - printk(KERN_WARNING "nfsd: client %s failed to respond to layout recall. " " Fencing..\n", addr_str); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; + int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); @@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, brelse(ii->i_bh); ii->i_bh = NULL; list_del_init(&ii->i_dirty); - if (!ii->vfs_inode.i_nlink) { + if (!ii->vfs_inode.i_nlink || during_mount) { /* - * Defer calling iput() to avoid a deadlock - * over I_SYNC flag for inodes with i_nlink == 0 + * Defer calling iput() to avoid deadlocks if + * i_nlink == 0 or mount is not yet finished. */ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); defer_iput = true; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) return false; - if (event_mask & marks_mask & ~marks_ignored_mask) + if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & + ~marks_ignored_mask) return true; return false; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) { - if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) return 1; return 0; } diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -102,11 +102,11 @@ | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ - | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) + | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ + | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ - | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ - | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) + | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) /* * Heartbeat-only devices are missing journals and other files. The @@ -179,6 +179,11 @@ #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 /* + * Append Direct IO support + */ +#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 + +/* * backup superblock flag is used to indicate that this volume * has backup superblocks. */ @@ -200,10 +205,6 @@ #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 -/* - * Append Direct IO support - */ -#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 /* The byte offset of the first backup block will be 1G. * The following will be 4G, 16G, 64G, 256G and 1T. diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index b90952f528b1..5f0d1993e6e3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) { struct ovl_fs *ufs = sb->s_fs_info; - if (!(*flags & MS_RDONLY) && - (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) + if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) return -EROFS; return 0; @@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) break; default: + pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p); return -EINVAL; } } + + /* Workdir is useless in non-upper mount */ + if (!config->upperdir && config->workdir) { + pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n", + config->workdir); + kfree(config->workdir); + config->workdir = NULL; + } + return 0; } @@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_stack_depth = 0; if (ufs->config.upperdir) { - /* FIXME: workdir is not needed for a R/O mount */ if (!ufs->config.workdir) { pr_err("overlayfs: missing 'workdir'\n"); goto out_free_config; @@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_free_config; + /* Upper fs should not be r/o */ + if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) { + pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n"); + err = -EINVAL; + goto out_put_upperpath; + } + err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_upperpath; @@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) err = -EINVAL; stacklen = ovl_split_lowerdirs(lowertmp); - if (stacklen > OVL_MAX_STACK) + if (stacklen > OVL_MAX_STACK) { + pr_err("overlayfs: too many lower directries, limit is %d\n", + OVL_MAX_STACK); goto out_free_lowertmp; + } else if (!ufs->config.upperdir && stacklen == 1) { + pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n"); + goto out_free_lowertmp; + } stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); if (!stack) @@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->numlower++; } - /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ - if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) + /* If the upper fs is nonexistent, we mark overlayfs r/o too */ + if (!ufs->upper_mnt) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 956b75d61809..6dee68d013ff 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1325,6 +1325,9 @@ out: static int pagemap_open(struct inode *inode, struct file *file) { + /* do not disclose physical addresses: attack vector */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " "to stop being page-shift some time soon. See the " "linux/Documentation/vm/pagemap.txt for details.\n"); diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -13,7 +13,8 @@ #define PULL_DISABLE (1 << 3) #define INPUT_EN (1 << 5) -#define SLEWCTRL_FAST (1 << 6) +#define SLEWCTRL_SLOW (1 << 6) +#define SLEWCTRL_FAST 0 /* update macro depending on INPUT_EN and PULL_ENA */ #undef PIN_OUTPUT diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -18,7 +18,8 @@ #define PULL_DISABLE (1 << 16) #define PULL_UP (1 << 17) #define INPUT_EN (1 << 18) -#define SLEWCTRL_FAST (1 << 19) +#define SLEWCTRL_SLOW (1 << 19) +#define SLEWCTRL_FAST 0 #define DS0_PULL_UP_DOWN_EN (1 << 27) #define PIN_OUTPUT (PULL_DISABLE) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7c55dd5dd2c9..66203b268984 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -114,6 +114,7 @@ struct vgic_ops { void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu); + void (*clear_eisr)(struct kvm_vcpu *vcpu); u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); void (*enable_underflow)(struct kvm_vcpu *vcpu); void (*disable_underflow)(struct kvm_vcpu *vcpu); diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); */ int clk_get_phase(struct clk *clk); +/** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q); + #else static inline long clk_get_accuracy(struct clk *clk) @@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) return -ENOTSUPP; } +static inline bool clk_is_match(const struct clk *p, const struct clk *q) +{ + return p == q; +} + #endif /** diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); */ struct mapped_device *dm_get_md(dev_t dev); void dm_get(struct mapped_device *md); +int dm_hold(struct mapped_device *md); void dm_put(struct mapped_device *md); /* diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..781974afff9f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -166,6 +166,11 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) #define GITS_CBASER_VALID (1UL << 63) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -5,6 +5,7 @@ struct kmem_cache; struct page; +struct vm_struct; #ifdef CONFIG_KASAN @@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); void kasan_slab_alloc(struct kmem_cache *s, void *object); void kasan_slab_free(struct kmem_cache *s, void *object); -#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) - int kasan_module_alloc(void *addr, size_t size); -void kasan_module_free(void *addr); +void kasan_free_shadow(const struct vm_struct *vm); #else /* CONFIG_KASAN */ -#define MODULE_ALIGN 1 - static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_enable_current(void) {} @@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_module_free(void *addr) {} +static inline void kasan_free_shadow(const struct vm_struct *vm) {} #endif /* CONFIG_KASAN */ diff --git a/include/linux/libata.h b/include/linux/libata.h index fc03efa64ffe..6b08cc106c21 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -232,6 +232,7 @@ enum { * led */ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ + ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */ diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index fb0390a1a498..ee7b1ce7a6f8 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -2999,6 +2999,9 @@ enum usb_irq_events { #define PALMAS_GPADC_TRIM15 0x0E #define PALMAS_GPADC_TRIM16 0x0F +/* TPS659038 regen2_ctrl offset iss different from palmas */ +#define TPS659038_REGEN2_CTRL 0x12 + /* TPS65917 Interrupt registers */ /* Registers for function INTERRUPT */ diff --git a/include/linux/module.h b/include/linux/module.h index 42999fe2dbd0..b03485bcb82a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -344,6 +344,10 @@ struct module { unsigned long *ftrace_callsites; #endif +#ifdef CONFIG_LIVEPATCH + bool klp_alive; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); + +#ifdef CONFIG_KASAN +#include <linux/kasan.h> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define MODULE_ALIGN PAGE_SIZE +#endif + #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 429d1790a27e..dcf6ec27739b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * - * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) + * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask) + * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags); * * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); * Called to change device carrier. Soft-devices (like dummy, team, etc) diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif -#ifdef CONFIG_OF_DYNAMIC +#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) extern void of_platform_register_reconfig_notifier(void); #else static inline void of_platform_register_reconfig_notifier(void) { } diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 72c0415d6c21..18eccefea06e 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void pinctrl_put(struct pinctrl *p) @@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state( struct pinctrl *p, const char *name) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline int pinctrl_select_state(struct pinctrl *p, @@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p, static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void devm_pinctrl_put(struct pinctrl *p) diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d4ad5b5a02bb..045f709cb89b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -316,7 +316,7 @@ struct regulator_desc { * @driver_data: private regulator data * @of_node: OpenFirmware node to parse for device tree bindings (may be * NULL). - * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is + * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is * insufficient. * @ena_gpio_initialized: GPIO controlling regulator enable was properly * initialized, meaning that >= 0 is a valid gpio diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432e14ff..a419b65770d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1625,11 +1625,11 @@ struct task_struct { /* * numa_faults_locality tracks if faults recorded during the last - * scan window were remote/local. The task scan period is adapted - * based on the locality of the faults with different weights - * depending on whether they were shared or private faults + * scan window were remote/local or failed to migrate. The task scan + * period is adapted based on the locality of the faults with different + * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[2]; + unsigned long numa_faults_locality[3]; unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ @@ -1719,6 +1719,7 @@ struct task_struct { #define TNF_NO_GROUP 0x02 #define TNF_SHARED 0x04 #define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30007afe70b3..f54d6659713a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_sender_cpu_clear(struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + skb->sender_cpu = 0; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..856d34dde79b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -649,7 +649,7 @@ struct spi_transfer { * sequence completes. On some systems, many such sequences can execute as * as single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages - * sent to a given spi_device are alway executed in FIFO order. + * sent to a given spi_device are always executed in FIFO order. * * The code that submits an spi_message (and its spi_transfers) * to the lower layers is responsible for managing its memory. diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index aadc6a04e1ac..807371357160 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -142,12 +142,18 @@ typedef __be32 rpc_fraghdr; (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) /* - * RFC1833/RFC3530 rpcbind (v3+) well-known netid's. + * Well-known netids. See: + * + * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml */ #define RPCBIND_NETID_UDP "udp" #define RPCBIND_NETID_TCP "tcp" +#define RPCBIND_NETID_RDMA "rdma" +#define RPCBIND_NETID_SCTP "sctp" #define RPCBIND_NETID_UDP6 "udp6" #define RPCBIND_NETID_TCP6 "tcp6" +#define RPCBIND_NETID_RDMA6 "rdma6" +#define RPCBIND_NETID_SCTP6 "sctp6" #define RPCBIND_NETID_LOCAL "local" /* diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 64a0a0a97b23..c984c85981ea 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -41,11 +41,6 @@ #define _LINUX_SUNRPC_XPRTRDMA_H /* - * rpcbind (v3+) RDMA netid. - */ -#define RPCBIND_NETID_RDMA "rdma" - -/* * Constants. Max RPC/NFS header is big enough to account for * additional marshaling buffers passed down by Linux client. * diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); + static inline size_t iov_iter_count(struct iov_iter *i) { return i->count; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ +#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ /* bits [20..32] reserved for arch specific ioremap internals */ /* diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -70,7 +70,8 @@ enum { /* data contains off-queue information when !WORK_STRUCT_PWQ */ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, - WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), + __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, + WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), /* * When a work item is off queue, its high bits point to the last diff --git a/include/net/dst.h b/include/net/dst.h index a8ae4e760778..0fb99a26e973 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -481,6 +481,7 @@ void dst_init(void); enum { XFRM_LOOKUP_ICMP = 1 << 0, XFRM_LOOKUP_QUEUE = 1 << 1, + XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, }; struct flowi; diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, const struct nf_loginfo *li, const char *fmt, ...); +__printf(8, 9) +void nf_log_trace(struct net *net, + u_int8_t pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *li, + const char *fmt, ...); + struct nf_log_buf; struct nf_log_buf *nf_log_buf_open(void); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, const struct nft_data *data, enum nft_data_types type); + +/** + * struct nft_userdata - user defined data associated with an object + * + * @len: length of the data + * @data: content + * + * The presence of user data is indicated in an object specific fashion, + * so a length of zero can't occur and the value "len" indicates data + * of length len + 1. + */ +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + /** * struct nft_set_elem - generic representation of set elements * @@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) * @handle: rule handle * @genmask: generation mask * @dlen: length of expression data - * @ulen: length of user data (used for comments) + * @udata: user data is appended to the rule * @data: expression data */ struct nft_rule { @@ -388,7 +404,7 @@ struct nft_rule { u64 handle:42, genmask:2, dlen:12, - ulen:8; + udata:1; unsigned char data[] __attribute__((aligned(__alignof__(struct nft_expr)))); }; @@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) return (struct nft_expr *)&rule->data[rule->dlen]; } -static inline void *nft_userdata(const struct nft_rule *rule) +static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) { return (void *)&rule->data[rule->dlen]; } diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eabd3a038674..c73e7abbbaa5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -91,6 +91,7 @@ struct vxlanhdr { #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) +#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) struct vxlan_metadata { diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h @@ -92,7 +92,7 @@ #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ -#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ +#define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ #define AT91_DDRSDRC_MD_SDR 0 #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index db81c65b8f48..d61be7297b2c 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -111,6 +111,7 @@ void array_free(void *array, int n); void target_core_setup_sub_cits(struct se_subsystem_api *); /* attribute helpers from target_core_device.c for backend drivers */ +bool se_dev_check_wce(struct se_device *); int se_dev_set_max_unmap_lba_count(struct se_device *, u32); int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_unmap_granularity(struct se_device *, u32); diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 23d561512f64..22317d2b52ab 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h @@ -7,27 +7,26 @@ #include <linux/ktime.h> #include <linux/tracepoint.h> -struct device; -struct regmap; +#include "../../../drivers/base/regmap/internal.h" /* * Log register events */ DECLARE_EVENT_CLASS(regmap_reg, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val), + TP_ARGS(map, reg, val), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, reg ) - __field( unsigned int, val ) + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( unsigned int, val ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->reg = reg; __entry->val = val; ), @@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg, DEFINE_EVENT(regmap_reg, regmap_reg_write, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DEFINE_EVENT(regmap_reg, regmap_reg_read, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, - TP_PROTO(struct device *dev, unsigned int reg, + TP_PROTO(struct regmap *map, unsigned int reg, unsigned int val), - TP_ARGS(dev, reg, val) + TP_ARGS(map, reg, val) ); DECLARE_EVENT_CLASS(regmap_block, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count), + TP_ARGS(map, reg, count), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, reg ) - __field( int, count ) + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( int, count ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->reg = reg; __entry->count = count; ), @@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block, DEFINE_EVENT(regmap_block, regmap_hw_read_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_read_done, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_write_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_block, regmap_hw_write_done, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); TRACE_EVENT(regcache_sync, - TP_PROTO(struct device *dev, const char *type, + TP_PROTO(struct regmap *map, const char *type, const char *status), - TP_ARGS(dev, type, status), + TP_ARGS(map, type, status), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __string( status, status ) - __string( type, type ) - __field( int, type ) + __string( name, regmap_name(map) ) + __string( status, status ) + __string( type, type ) + __field( int, type ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __assign_str(status, status); __assign_str(type, type); ), @@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync, DECLARE_EVENT_CLASS(regmap_bool, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag), + TP_ARGS(map, flag), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( int, flag ) + __string( name, regmap_name(map) ) + __field( int, flag ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->flag = flag; ), @@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool, DEFINE_EVENT(regmap_bool, regmap_cache_only, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag) + TP_ARGS(map, flag) ); DEFINE_EVENT(regmap_bool, regmap_cache_bypass, - TP_PROTO(struct device *dev, bool flag), + TP_PROTO(struct regmap *map, bool flag), - TP_ARGS(dev, flag) + TP_ARGS(map, flag) ); DECLARE_EVENT_CLASS(regmap_async, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev), + TP_ARGS(map), TP_STRUCT__entry( - __string( name, dev_name(dev) ) + __string( name, regmap_name(map) ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); ), TP_printk("%s", __get_str(name)) @@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async, DEFINE_EVENT(regmap_block, regmap_async_write_start, - TP_PROTO(struct device *dev, unsigned int reg, int count), + TP_PROTO(struct regmap *map, unsigned int reg, int count), - TP_ARGS(dev, reg, count) + TP_ARGS(map, reg, count) ); DEFINE_EVENT(regmap_async, regmap_async_io_complete, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); DEFINE_EVENT(regmap_async, regmap_async_complete_start, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); DEFINE_EVENT(regmap_async, regmap_async_complete_done, - TP_PROTO(struct device *dev), + TP_PROTO(struct regmap *map), - TP_ARGS(dev) + TP_ARGS(map) ); TRACE_EVENT(regcache_drop_region, - TP_PROTO(struct device *dev, unsigned int from, + TP_PROTO(struct regmap *map, unsigned int from, unsigned int to), - TP_ARGS(dev, from, to), + TP_ARGS(map, from, to), TP_STRUCT__entry( - __string( name, dev_name(dev) ) - __field( unsigned int, from ) - __field( unsigned int, to ) + __string( name, regmap_name(map) ) + __field( unsigned int, from ) + __field( unsigned int, to ) ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name, regmap_name(map)); __entry->from = from; __entry->to = to; ), diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 3c53eec4ae22..19c66fcbab8a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -60,7 +60,7 @@ struct virtio_blk_config { __u32 size_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ __u32 seg_max; - /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ + /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ struct virtio_blk_geometry { __u16 cylinders; __u8 heads; @@ -119,7 +119,11 @@ struct virtio_blk_config { #define VIRTIO_BLK_T_BARRIER 0x80000000 #endif /* !VIRTIO_BLK_NO_LEGACY */ -/* This is the first element of the read scatter-gather list. */ +/* + * This comes first in the read scatter-gather list. + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, + * this is the first element of the read scatter-gather list. + */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ __virtio32 type; diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index 42b9370771b0..cc18ef8825c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -29,8 +29,16 @@ #include <linux/virtio_types.h> -#define VIRTIO_SCSI_CDB_SIZE 32 -#define VIRTIO_SCSI_SENSE_SIZE 96 +/* Default values of the CDB and sense data size configuration fields */ +#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 +#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 + +#ifndef VIRTIO_SCSI_CDB_SIZE +#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE +#endif +#ifndef VIRTIO_SCSI_SENSE_SIZE +#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE +#endif /* SCSI command request, followed by data-out */ struct virtio_scsi_cmd_req { diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -689,6 +689,7 @@ struct omapdss_dsi_ops { }; struct omap_dss_device { + struct kobject kobj; struct device *dev; struct module *owner; diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, const char *mod_name); #define xenbus_register_frontend(drv) \ - __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) #define xenbus_register_backend(drv) \ - __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) void xenbus_unregister_driver(struct xenbus_driver *drv); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, rcu_read_lock(); cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { - if (cp == root_cs) - continue; - /* skip the whole subtree if @cp doesn't have any CPU */ if (cpumask_empty(cp->cpus_allowed)) { pos_css = css_rightmost_descendant(pos_css); @@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) * If it becomes empty, inherit the effective mask of the * parent, which is guaranteed to have some CPUs. */ - if (cpumask_empty(new_cpus)) + if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) cpumask_copy(new_cpus, parent->effective_cpus); /* Skip the whole subtree if the cpumask remains the same. */ @@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) * If it becomes empty, inherit the effective mask of the * parent, which is guaranteed to have some MEMs. */ - if (nodes_empty(*new_mems)) + if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) *new_mems = parent->effective_mems; /* Skip the whole subtree if the nodemask remains the same. */ @@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) spin_lock_irq(&callback_lock); cs->mems_allowed = parent->mems_allowed; + cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); + cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); diff --git a/kernel/events/core.c b/kernel/events/core.c index f04daabfd1cf..2fabc0627165 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event) ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); WARN_ON_ONCE(ctx->parent_ctx); perf_remove_from_context(event, true); - mutex_unlock(&ctx->mutex); + perf_event_ctx_unlock(event, ctx); _free_event(event); } @@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry) { struct perf_event *event = container_of(entry, struct perf_event, pending); + int rctx; + + rctx = perf_swevent_get_recursion_context(); + /* + * If we 'fail' here, that's OK, it means recursion is already disabled + * and we won't recurse 'further'. + */ if (event->pending_disable) { event->pending_disable = 0; @@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry) event->pending_wakeup = 0; perf_event_wakeup(event); } + + if (rctx >= 0) + perf_swevent_put_recursion_context(rctx); } /* diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 01ca08804f51..3f9f1d6b4c2e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj) /* sets obj->mod if object is not vmlinux and module is found */ static void klp_find_object_module(struct klp_object *obj) { + struct module *mod; + if (!klp_is_module(obj)) return; mutex_lock(&module_mutex); /* - * We don't need to take a reference on the module here because we have - * the klp_mutex, which is also taken by the module notifier. This - * prevents any module from unloading until we release the klp_mutex. + * We do not want to block removal of patched modules and therefore + * we do not take a reference here. The patches are removed by + * a going module handler instead. + */ + mod = find_module(obj->name); + /* + * Do not mess work of the module coming and going notifiers. + * Note that the patch might still be needed before the going handler + * is called. Module functions can be called even in the GOING state + * until mod->exit() finishes. This is especially important for + * patches that modify semantic of the functions. */ - obj->mod = find_module(obj->name); + if (mod && mod->klp_alive) + obj->mod = mod; + mutex_unlock(&module_mutex); } @@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) return -EINVAL; obj->state = KLP_DISABLED; + obj->mod = NULL; klp_find_object_module(obj); @@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, mutex_lock(&klp_mutex); + /* + * Each module has to know that the notifier has been called. + * We never know what module will get patched by a new patch. + */ + if (action == MODULE_STATE_COMING) + mod->klp_alive = true; + else /* MODULE_STATE_GOING */ + mod->klp_alive = false; + list_for_each_entry(patch, &klp_patches, list) { for (obj = patch->objs; obj->funcs; obj++) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 88d0d4420ad2..ba77ab5f64dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) if (!new_class->name) return 0; - list_for_each_entry(class, &all_lock_classes, lock_entry) { + list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { if (new_class->key - new_class->subclass == class->key) return class->name_version; if (class->name && !strcmp(class->name, new_class->name)) @@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) hash_head = classhashentry(key); /* - * We can walk the hash lockfree, because the hash only - * grows, and we are careful when adding entries to the end: + * We do an RCU walk of the hash, see lockdep_free_key_range(). */ - list_for_each_entry(class, hash_head, hash_entry) { + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return NULL; + + list_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) { /* * Huh! same key, different name? Did someone trample @@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; - unsigned long flags; + + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); class = look_up_lock_class(lock, subclass); if (likely(class)) @@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) key = lock->key->subkeys + subclass; hash_head = classhashentry(key); - raw_local_irq_save(flags); if (!graph_lock()) { - raw_local_irq_restore(flags); return NULL; } /* * We have to do the hash-walk again, to avoid races * with another CPU: */ - list_for_each_entry(class, hash_head, hash_entry) + list_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) goto out_unlock_set; + } + /* * Allocate a new key from the static array, and add it to * the hash: */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (!debug_locks_off_graph_unlock()) { - raw_local_irq_restore(flags); return NULL; } - raw_local_irq_restore(flags); print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); dump_stack(); @@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) if (verbose(class)) { graph_unlock(); - raw_local_irq_restore(flags); printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) @@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) printk("\n"); dump_stack(); - raw_local_irq_save(flags); if (!graph_lock()) { - raw_local_irq_restore(flags); return NULL; } } out_unlock_set: graph_unlock(); - raw_local_irq_restore(flags); out_set_class_cache: if (!subclass || force) @@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, entry->distance = distance; entry->trace = *trace; /* - * Since we never remove from the dependency list, the list can - * be walked lockless by other CPUs, it's only allocation - * that must be protected by the spinlock. But this also means - * we must make new entries visible only once writes to the - * entry become visible - hence the RCU op: + * Both allocation and removal are done under the graph lock; but + * iteration is under RCU-sched; see look_up_lock_class() and + * lockdep_free_key_range(). */ list_add_tail_rcu(&entry->entry, head); @@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, else head = &lock->class->locks_before; - list_for_each_entry(entry, head, entry) { + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + + list_for_each_entry_rcu(entry, head, entry) { if (!lock_accessed(entry)) { unsigned int cq_depth; mark_lock_accessed(entry, lock); @@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, * We can walk it lock-free, because entries only get added * to the hash: */ - list_for_each_entry(chain, hash_head, entry) { + list_for_each_entry_rcu(chain, hash_head, entry) { if (chain->chain_key == chain_key) { cache_hit: debug_atomic_inc(chain_lookup_hits); @@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, if (unlikely(!debug_locks)) return; - if (subclass) + if (subclass) { + unsigned long flags; + + if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + current->lockdep_recursion = 1; register_lock_class(lock, subclass, 1); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); + } } EXPORT_SYMBOL_GPL(lockdep_init_map); @@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) return addr >= start && addr < start + size; } +/* + * Used in module.c to remove lock classes from memory that is going to be + * freed; and possibly re-used by other modules. + * + * We will have had one sync_sched() before getting here, so we're guaranteed + * nobody will look up these exact classes -- they're properly dead but still + * allocated. + */ void lockdep_free_key_range(void *start, unsigned long size) { - struct lock_class *class, *next; + struct lock_class *class; struct list_head *head; unsigned long flags; int i; @@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) head = classhash_table + i; if (list_empty(head)) continue; - list_for_each_entry_safe(class, next, head, hash_entry) { + list_for_each_entry_rcu(class, head, hash_entry) { if (within(class->key, start, size)) zap_class(class); else if (within(class->name, start, size)) @@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) if (locked) graph_unlock(); raw_local_irq_restore(flags); + + /* + * Wait for any possible iterators from look_up_lock_class() to pass + * before continuing to free the memory they refer to. + * + * sync_sched() is sufficient because the read-side is IRQ disable. + */ + synchronize_sched(); + + /* + * XXX at this point we could return the resources to the pool; + * instead we leak them. We would need to change to bitmap allocators + * instead of the linear allocators we have now. + */ } void lockdep_reset_lock(struct lockdep_map *lock) { - struct lock_class *class, *next; + struct lock_class *class; struct list_head *head; unsigned long flags; int i, j; @@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) head = classhash_table + i; if (list_empty(head)) continue; - list_for_each_entry_safe(class, next, head, hash_entry) { + list_for_each_entry_rcu(class, head, hash_entry) { int match = 0; for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..99fdf94efce8 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -56,7 +56,6 @@ #include <linux/async.h> #include <linux/percpu.h> #include <linux/kmemleak.h> -#include <linux/kasan.h> #include <linux/jump_label.h> #include <linux/pfn.h> #include <linux/bsearch.h> @@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } void __weak module_memfree(void *module_region) { vfree(module_region); - kasan_module_free(module_region); } void __weak module_arch_cleanup(struct module *mod) @@ -1867,7 +1865,7 @@ static void free_module(struct module *mod) kfree(mod->args); percpu_modfree(mod); - /* Free lock-classes: */ + /* Free lock-classes; relies on the preceding sync_rcu(). */ lockdep_free_key_range(mod->module_core, mod->core_size); /* Finally, free the core (containing the module structure) */ @@ -3351,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs, module_bug_cleanup(mod); mutex_unlock(&module_mutex); - /* Free lock-classes: */ - lockdep_free_key_range(mod->module_core, mod->core_size); - /* we can't deallocate the module until we clear memory protection */ unset_module_init_ro_nx(mod); unset_module_core_ro_nx(mod); @@ -3377,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs, synchronize_rcu(); mutex_unlock(&module_mutex); free_module: + /* Free lock-classes; relies on the preceding sync_rcu() */ + lockdep_free_key_range(mod->module_core, mod->core_size); + module_deallocate(mod, info); free_copy: free_copy(info); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e8a345..62671f53202a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) } else { if (dl_prio(oldprio)) p->dl.dl_boosted = 0; + if (rt_prio(oldprio)) + p->rt.timeout = 0; p->sched_class = &fair_sched_class; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ce18f3c097a..bcfe32088b37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p, /* * If there were no record hinting faults then either the task is * completely idle or all activity is areas that are not of interest - * to automatic numa balancing. Scan slower + * to automatic numa balancing. Related to that, if there were failed + * migration then it implies we are migrating too quickly or the local + * node is overloaded. In either case, scan slower */ - if (local + shared == 0) { + if (local + shared == 0 || p->numa_faults_locality[2]) { p->numa_scan_period = min(p->numa_scan_period_max, p->numa_scan_period << 1); @@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) if (migrated) p->numa_pages_migrated += pages; + if (flags & TNF_MIGRATE_FAIL) + p->numa_faults_locality[2] += pages; p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, */ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) { + int bc_moved; /* * We try to cancel the timer first. If the callback is on * flight on some other cpu then we let it handle it. If we @@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) * restart the timer because we are in the callback, but we * can set the expiry time and let the callback return * HRTIMER_RESTART. + * + * Since we are in the idle loop at this point and because + * hrtimer_{start/cancel} functions call into tracing, + * calls to these functions must be bound within RCU_NONIDLE. */ - if (hrtimer_try_to_cancel(&bctimer) >= 0) { - hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); + RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? + !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : + 0); + if (bc_moved) { /* Bind the "device" to the cpu */ bc->bound_on = smp_processor_id(); } else if (bc->bound_on == smp_processor_id()) { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) static struct pid * const ftrace_swapper_pid = &init_struct_pid; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int ftrace_graph_active; +#else +# define ftrace_graph_active 0 +#endif + #ifdef CONFIG_DYNAMIC_FTRACE static struct ftrace_ops *removed_ops; @@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) if (!ftrace_rec_count(rec)) rec->flags = 0; else - /* Just disable the record (keep REGS state) */ - rec->flags &= ~FTRACE_FL_ENABLED; + /* + * Just disable the record, but keep the ops TRAMP + * and REGS states. The _EN flags must be disabled though. + */ + rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | + FTRACE_FL_REGS_EN); } return FTRACE_UPDATE_MAKE_NOP; @@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) static void ftrace_startup_sysctl(void) { + int command; + if (unlikely(ftrace_disabled)) return; /* Force update next time */ saved_ftrace_func = NULL; /* ftrace_start_up is true if we want ftrace running */ - if (ftrace_start_up) - ftrace_run_update_code(FTRACE_UPDATE_CALLS); + if (ftrace_start_up) { + command = FTRACE_UPDATE_CALLS; + if (ftrace_graph_active) + command |= FTRACE_START_FUNC_RET; + ftrace_startup_enable(command); + } } static void ftrace_shutdown_sysctl(void) { + int command; + if (unlikely(ftrace_disabled)) return; /* ftrace_start_up is true if ftrace is running */ - if (ftrace_start_up) - ftrace_run_update_code(FTRACE_DISABLE_CALLS); + if (ftrace_start_up) { + command = FTRACE_DISABLE_CALLS; + if (ftrace_graph_active) + command |= FTRACE_STOP_FUNC_RET; + ftrace_run_update_code(command); + } } static cycle_t ftrace_update_time; @@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, if (ftrace_enabled) { - ftrace_startup_sysctl(); - /* we are starting ftrace again */ if (ftrace_ops_list != &ftrace_list_end) update_ftrace_function(); + ftrace_startup_sysctl(); + } else { /* stopping ftrace calls (just send to ftrace_stub) */ ftrace_trace_function = ftrace_stub; @@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) }; -static int ftrace_graph_active; - int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) { return 0; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) } EXPORT_SYMBOL_GPL(flush_work); +struct cwt_wait { + wait_queue_t wait; + struct work_struct *work; +}; + +static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) +{ + struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); + + if (cwait->work != key) + return 0; + return autoremove_wake_function(wait, mode, sync, key); +} + static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) { + static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long flags; int ret; do { ret = try_to_grab_pending(work, is_dwork, &flags); /* - * If someone else is canceling, wait for the same event it - * would be waiting for before retrying. + * If someone else is already canceling, wait for it to + * finish. flush_work() doesn't work for PREEMPT_NONE + * because we may get scheduled between @work's completion + * and the other canceling task resuming and clearing + * CANCELING - flush_work() will return false immediately + * as @work is no longer busy, try_to_grab_pending() will + * return -ENOENT as @work is still being canceled and the + * other canceling task won't be able to clear CANCELING as + * we're hogging the CPU. + * + * Let's wait for completion using a waitqueue. As this + * may lead to the thundering herd problem, use a custom + * wake function which matches @work along with exclusive + * wait and wakeup. */ - if (unlikely(ret == -ENOENT)) - flush_work(work); + if (unlikely(ret == -ENOENT)) { + struct cwt_wait cwait; + + init_wait(&cwait.wait); + cwait.wait.func = cwt_wakefn; + cwait.work = work; + + prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, + TASK_UNINTERRUPTIBLE); + if (work_is_canceling(work)) + schedule(); + finish_wait(&cancel_waitq, &cwait.wait); + } } while (unlikely(ret < 0)); /* tell other tasks trying to grab @work to back off */ @@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) flush_work(work); clear_work_data(work); + + /* + * Paired with prepare_to_wait() above so that either + * waitqueue_active() is visible here or !work_is_canceling() is + * visible there. + */ + smp_mb(); + if (waitqueue_active(&cancel_waitq)) + __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); + return ret; } diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -24,7 +24,7 @@ obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c @@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) return npages; } EXPORT_SYMBOL(iov_iter_npages); + +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) +{ + *new = *old; + if (new->type & ITER_BVEC) + return new->bvec = kmemdup(new->bvec, + new->nr_segs * sizeof(struct bio_vec), + flags); + else + /* iovec and kvec have identical layout */ + return new->iov = kmemdup(new->iov, + new->nr_segs * sizeof(struct iovec), + flags); +} +EXPORT_SYMBOL(dup_iter); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 7a85967060a5..f0f5c5c3de12 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* Error: request to write beyond destination buffer */ if (cpy > oend) goto _output_error; + if ((ref + COPYLENGTH) > oend || + (op + COPYLENGTH) > oend) + goto _output_error; LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); while (op < cpy) *op++ = *ref++; diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 88c0854bd752..5c94e1012a91 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) if (s->len < s->size) { len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); - if (seq_buf_can_fit(s, len)) { + if (s->len + len < s->size) { s->len += len; return 0; } @@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) if (s->len < s->size) { ret = bstr_printf(s->buffer + s->len, len, fmt, binary); - if (seq_buf_can_fit(s, ret)) { + if (s->len + ret < s->size) { s->len += ret; return 0; } diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ - iov_iter.o debug.o $(mmu-y) + debug.o $(mmu-y) obj-y += init-mm.o @@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) return (1UL << (align_order - cma->order_per_bit)) - 1; } +/* + * Find a PFN aligned to the specified order and return an offset represented in + * order_per_bits. + */ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) { - unsigned int alignment; - if (align_order <= cma->order_per_bit) return 0; - alignment = 1UL << (align_order - cma->order_per_bit); - return ALIGN(cma->base_pfn, alignment) - - (cma->base_pfn >> cma->order_per_bit); + + return (ALIGN(cma->base_pfn, (1UL << align_order)) + - cma->base_pfn) >> cma->order_per_bit; } static unsigned long cma_bitmap_maxno(struct cma *cma) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..6817b0350c71 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int target_nid, last_cpupid = -1; bool page_locked; bool migrated = false; + bool was_writable; int flags = 0; /* A PROT_NONE fault should not end up here */ @@ -1291,12 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, flags |= TNF_FAULT_LOCAL; } - /* - * Avoid grouping on DSO/COW pages in specific and RO pages - * in general, RO pages shouldn't hurt as much anyway since - * they can be in shared cache state. - */ - if (!pmd_write(pmd)) + /* See similar comment in do_numa_page for explanation */ + if (!(vma->vm_flags & VM_WRITE)) flags |= TNF_NO_GROUP; /* @@ -1353,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (migrated) { flags |= TNF_MIGRATED; page_nid = target_nid; - } + } else + flags |= TNF_MIGRATE_FAIL; goto out; clear_pmdnuma: BUG_ON(!PageLocked(page)); + was_writable = pmd_write(pmd); pmd = pmd_modify(pmd, vma->vm_page_prot); + pmd = pmd_mkyoung(pmd); + if (was_writable) + pmd = pmd_mkwrite(pmd); set_pmd_at(mm, haddr, pmdp, pmd); update_mmu_cache_pmd(vma, addr, pmdp); unlock_page(page); @@ -1482,6 +1484,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { pmd_t entry; + bool preserve_write = prot_numa && pmd_write(*pmd); + ret = 1; /* * Avoid trapping faults against the zero page. The read-only @@ -1490,16 +1494,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (prot_numa && is_huge_zero_pmd(*pmd)) { spin_unlock(ptl); - return 0; + return ret; } if (!prot_numa || !pmd_protnone(*pmd)) { - ret = 1; entry = pmdp_get_and_clear_notify(mm, addr, pmd); entry = pmd_modify(entry, newprot); + if (preserve_write) + entry = pmd_mkwrite(entry); ret = HPAGE_PMD_NR; set_pmd_at(mm, addr, pmd, entry); - BUG_ON(pmd_write(entry)); + BUG_ON(!preserve_write && pmd_write(entry)); } spin_unlock(ptl); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) __SetPageHead(page); __ClearPageReserved(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { - __SetPageTail(p); /* * For gigantic hugepages allocated through bootmem at * boot, it's safer to be consistent with the not-gigantic @@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) __ClearPageReserved(p); set_page_count(p, 0); p->first_page = page; + /* Make sure p->first_page is always valid for PageTail() */ + smp_wmb(); + __SetPageTail(p); } } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -29,6 +29,7 @@ #include <linux/stacktrace.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/vmalloc.h> #include <linux/kasan.h> #include "kasan.h" @@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, __builtin_return_address(0)); - return ret ? 0 : -ENOMEM; + + if (ret) { + find_vm_area(addr)->flags |= VM_KASAN; + return 0; + } + + return -ENOMEM; } -void kasan_module_free(void *addr) +void kasan_free_shadow(const struct vm_struct *vm) { - vfree(kasan_mem_to_shadow(addr)); + if (vm->flags & VM_KASAN) + vfree(kasan_mem_to_shadow(vm->addr)); } static void register_global(struct kasan_global *global) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) * on for the root memcg is enough. */ if (cgroup_on_dfl(root_css->cgroup)) - mem_cgroup_from_css(root_css)->use_hierarchy = true; + root_mem_cgroup->use_hierarchy = true; + else + root_mem_cgroup->use_hierarchy = false; } static u64 memory_current_read(struct cgroup_subsys_state *css, diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..97839f5c8c30 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int last_cpupid; int target_nid; bool migrated = false; + bool was_writable = pte_write(pte); int flags = 0; /* A PROT_NONE fault should not end up here */ @@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Make it present again */ pte = pte_modify(pte, vma->vm_page_prot); pte = pte_mkyoung(pte); + if (was_writable) + pte = pte_mkwrite(pte); set_pte_at(mm, addr, ptep, pte); update_mmu_cache(vma, addr, ptep); @@ -3069,11 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Avoid grouping on DSO/COW pages in specific and RO pages - * in general, RO pages shouldn't hurt as much anyway since - * they can be in shared cache state. + * Avoid grouping on RO pages in general. RO pages shouldn't hurt as + * much anyway since they can be in shared cache state. This misses + * the case where a mapping is writable but the process never writes + * to it but pte_write gets cleared during protection updates and + * pte_dirty has unpredictable behaviour between PTE scan updates, + * background writeback, dirty balancing and application behaviour. */ - if (!pte_write(pte)) + if (!(vma->vm_flags & VM_WRITE)) flags |= TNF_NO_GROUP; /* @@ -3097,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (migrated) { page_nid = target_nid; flags |= TNF_MIGRATED; - } + } else + flags |= TNF_MIGRATE_FAIL; out: if (page_nid != -1) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9fab10795bea..65842d688b7c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) return NULL; arch_refresh_nodedata(nid, pgdat); + } else { + /* Reset the nr_zones and classzone_idx to 0 before reuse */ + pgdat->nr_zones = 0; + pgdat->classzone_idx = 0; } /* we can use NODE_DATA(nid) from here */ @@ -1977,15 +1981,6 @@ void try_offline_node(int nid) if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } - - /* - * Since there is no way to guarentee the address of pgdat/zone is not - * on stack of any kernel threads or used by other kernel objects - * without reference counting or other symchronizing method, do not - * reset node_data and free pgdat here. Just reset it to 0 and reuse - * the memory when the node is online again. - */ - memset(pgdat, 0, sizeof(*pgdat)); } EXPORT_SYMBOL(try_offline_node); diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -26,10 +26,10 @@ int can_do_mlock(void) { - if (capable(CAP_IPC_LOCK)) - return 1; if (rlimit(RLIMIT_MEMLOCK) != 0) return 1; + if (capable(CAP_IPC_LOCK)) + return 1; return 0; } EXPORT_SYMBOL(can_do_mlock); diff --git a/mm/mmap.c b/mm/mmap.c index da9990acc08b..9ec50a368634 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end); importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) { - importer->anon_vma = NULL; + if (error) return error; - } } } diff --git a/mm/mprotect.c b/mm/mprotect.c index 44727811bf4c..88584838e704 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, oldpte = *pte; if (pte_present(oldpte)) { pte_t ptent; + bool preserve_write = prot_numa && pte_write(oldpte); /* * Avoid trapping faults against the zero or KSM @@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ptent = ptep_modify_prot_start(mm, addr, pte); ptent = pte_modify(ptent, newprot); + if (preserve_write) + ptent = pte_mkwrite(ptent); /* Avoid taking write faults for known dirty pages */ if (dirty_accountable && pte_dirty(ptent) && diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -62,6 +62,7 @@ void *high_memory; EXPORT_SYMBOL(high_memory); struct page *mem_map; unsigned long max_mapnr; +EXPORT_SYMBOL(max_mapnr); unsigned long highest_memmap_pfn; struct percpu_counter vm_committed_as; int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 45e187b2d971..644bcb665773 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period + * + * @written may have decreased due to account_page_redirty(). + * Avoid underflowing @bw calculation. */ - bw = written - bdi->written_stamp; + bw = written - min(written, bdi->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { do_div(bw, elapsed); @@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, unsigned long now) { static DEFINE_SPINLOCK(dirty_lock); - static unsigned long update_time; + static unsigned long update_time = INITIAL_JIFFIES; /* * check locklessly first to optimize away locking for the most time diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; } /* Exhausted what can be done so it's blamo time */ - if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) + if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) + || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: oom_zonelist_unlock(ac->zonelist, gfp_mask); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 72f5ac381ab3..755a42c76eb4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (!is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); + kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); isolated_page = page; } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 75c1f2878519..29f2f8b853ae 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end, vma = vma->vm_next; err = walk_page_test(start, next, walk); - if (err > 0) + if (err > 0) { + /* + * positive return values are purely for + * controlling the pagewalk, so should never + * be passed to the callers. + */ + err = 0; continue; + } if (err < 0) break; } diff --git a/mm/rmap.c b/mm/rmap.c index 5e3e09081164..c161a14b6a8f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) return 0; enomem_failure: + /* + * dst->anon_vma is dropped here otherwise its degree can be incorrectly + * decremented in unlink_anon_vmas(). + * We can safely do this because callers of anon_vma_clone() don't care + * about dst->anon_vma if anon_vma_clone() failed. + */ + dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } diff --git a/mm/slub.c b/mm/slub.c index 6832c4eab104..82c473780c91 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2449,7 +2449,8 @@ redo: do { tid = this_cpu_read(s->cpu_slab->tid); c = raw_cpu_ptr(s->cpu_slab); - } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); + } while (IS_ENABLED(CONFIG_PREEMPT) && + unlikely(tid != READ_ONCE(c->tid))); /* * Irqless object alloc/free algorithm used here depends on sequence @@ -2718,7 +2719,8 @@ redo: do { tid = this_cpu_read(s->cpu_slab->tid); c = raw_cpu_ptr(s->cpu_slab); - } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); + } while (IS_ENABLED(CONFIG_PREEMPT) && + unlikely(tid != READ_ONCE(c->tid))); /* Same with comment on barrier() in slab_alloc_node() */ barrier(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) spin_unlock(&vmap_area_lock); vmap_debug_free_range(va->va_start, va->va_end); + kasan_free_shadow(vm); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index d8e376a5f0f1..36a1a739ad68 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) static void p9_virtio_remove(struct virtio_device *vdev) { struct virtio_chan *chan = vdev->priv; - - if (chan->inuse) - p9_virtio_close(chan->client); - vdev->config->del_vqs(vdev); + unsigned long warning_time; mutex_lock(&virtio_9p_lock); + + /* Remove self from list so we don't get new users. */ list_del(&chan->chan_list); + warning_time = jiffies; + + /* Wait for existing users to close. */ + while (chan->inuse) { + mutex_unlock(&virtio_9p_lock); + msleep(250); + if (time_after(jiffies, warning_time + 10 * HZ)) { + dev_emerg(&vdev->dev, + "p9_virtio_remove: waiting for device in use.\n"); + warning_time = jiffies; + } + mutex_lock(&virtio_9p_lock); + } + mutex_unlock(&virtio_9p_lock); + + vdev->config->del_vqs(vdev); + sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); kfree(chan->tag); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b087d278c679..1849d96b3c91 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) */ del_nbp(p); + dev_set_mtu(br->dev, br_min_mtu(br)); + spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 769b185fefbd..a6e2da0bc718 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, int copylen; ret = -EOPNOTSUPP; - if (m->msg_flags&MSG_OOB) + if (flags & MSG_OOB) goto read_error; skb = skb_recv_datagram(sk, flags, 0 , &ret); diff --git a/net/can/af_can.c b/net/can/af_can.c index 66e08040ced7..32d710eaf1fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop) goto inval_skb; } + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); diff --git a/net/compat.c b/net/compat.c index 94d3d5e97883..f7bd286a8280 100644 --- a/net/compat.c +++ b/net/compat.c @@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; + + if (!uaddr) + kmsg->msg_namelen = 0; + + if (kmsg->msg_namelen < 0) + return -EINVAL; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); kmsg->msg_control = compat_ptr(tmp3); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 25b4b5d23485..ee0608bb3bc0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2166,28 +2166,28 @@ replay: } } err = rtnl_configure_link(dev, ifm); - if (err < 0) { - if (ops->newlink) { - LIST_HEAD(list_kill); - - ops->dellink(dev, &list_kill); - unregister_netdevice_many(&list_kill); - } else { - unregister_netdevice(dev); - } - goto out; - } - + if (err < 0) + goto out_unregister; if (link_net) { err = dev_change_net_namespace(dev, dest_net, ifname); if (err < 0) - unregister_netdevice(dev); + goto out_unregister; } out: if (link_net) put_net(link_net); put_net(dest_net); return err; +out_unregister: + if (ops->newlink) { + LIST_HEAD(list_kill); + + ops->dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); + } else { + unregister_netdevice(dev); + } + goto out; } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f80507823531..8e4ac97c8477 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3733,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, struct sock *sk, int tstype) { struct sk_buff *skb; - bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; + bool tsonly; - if (!sk || !skb_may_tx_timestamp(sk, tsonly)) + if (!sk) + return; + + tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; + if (!skb_may_tx_timestamp(sk, tsonly)) return; if (tsonly) @@ -4173,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->ignore_df = 0; skb_dst_drop(skb); skb->mark = 0; - skb->sender_cpu = 0; + skb_sender_cpu_clear(skb); skb_init_secmark(skb); secpath_reset(skb); nf_reset(skb); diff --git a/net/core/sock.c b/net/core/sock.c index 93c8b20c91e4..78e89eb7eb70 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_rfree); +/* + * Buffer destructor for skbs that are not used directly in read or write + * path, e.g. for error handler skbs. Automatically called from kfree_skb. + */ void sock_efree(struct sk_buff *skb) { sock_put(skb->sk); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 433424804284..8ce351ffceb1 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -25,6 +25,8 @@ static int zero = 0; static int one = 1; static int ushort_max = USHRT_MAX; +static int min_sndbuf = SOCK_MIN_SNDBUF; +static int min_rcvbuf = SOCK_MIN_RCVBUF; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "rmem_max", @@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "wmem_default", @@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "rmem_default", @@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "dev_weight", diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14d02ea905b6..3e44b9b0b78e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) release_sock(sk); if (reqsk_queue_empty(&icsk->icsk_accept_queue)) timeo = schedule_timeout(timeo); + sched_annotate_sleep(); lock_sock(sk); err = 0; if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 81751f12645f..592aff37366b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler( mutex_unlock(&inet_diag_table_mutex); } +static size_t inet_sk_attr_size(void) +{ + return nla_total_size(sizeof(struct tcp_info)) + + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(sizeof(struct inet_diag_meminfo)) + + nla_total_size(sizeof(struct inet_diag_msg)) + + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + + nla_total_size(TCP_CA_NAME_MAX) + + nla_total_size(sizeof(struct tcpvegas_info)) + + 64; +} + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, struct inet_diag_req_v2 *req, struct user_namespace *user_ns, @@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s if (err) goto out; - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + - sizeof(struct tcp_info) + 64, GFP_KERNEL); + rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); if (!rep) { err = -ENOMEM; goto out; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 787b3c294ce6..d9bc28ac5d1b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb) if (unlikely(opt->optlen)) ip_forward_options(skb); + skb_sender_cpu_clear(skb); return dst_output(skb); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2c8d98e728c0..145a50c4d566 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag); struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { struct iphdr iph; + int netoff; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; - if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) + netoff = skb_network_offset(skb); + + if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) return skb; if (iph.ihl < 5 || iph.version != 4) return skb; len = ntohs(iph.tot_len); - if (skb->len < len || len < (iph.ihl * 4)) + if (skb->len < netoff + len || len < (iph.ihl * 4)) return skb; if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { - if (!pskb_may_pull(skb, iph.ihl*4)) + if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) return skb; - if (pskb_trim_rcsum(skb, len)) + if (pskb_trim_rcsum(skb, netoff + len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(skb, user)) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 31d8c71986b4..5cd99271d3a6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf kfree_skb(skb); } -static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, - const struct sk_buff *skb, - int ee_origin) +/* IPv4 supports cmsg on all imcp errors and some timestamps + * + * Timestamp code paths do not initialize the fields expected by cmsg: + * the PKTINFO fields in skb->cb[]. Fill those in here. + */ +static bool ipv4_datagram_support_cmsg(const struct sock *sk, + struct sk_buff *skb, + int ee_origin) { - struct in_pktinfo *info = PKTINFO_SKB_CB(skb); + struct in_pktinfo *info; + + if (ee_origin == SO_EE_ORIGIN_ICMP) + return true; - if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || - (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || + if (ee_origin == SO_EE_ORIGIN_LOCAL) + return false; + + /* Support IP_PKTINFO on tstamp packets if requested, to correlate + * timestamp with egress dev. Not possible for packets without dev + * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). + */ + if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || (!skb->dev)) return false; + info = PKTINFO_SKB_CB(skb); info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; info->ipi_ifindex = skb->dev->ifindex; return true; @@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) serr = SKB_EXT_ERR(skb); - if (sin && skb->len) { + if (sin && serr->port) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); @@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) sin = &errhdr.offender; memset(sin, 0, sizeof(*sin)); - if (skb->len && - (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || - ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) { + if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; if (inet_sk(sk)->cmsg_flags) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99e810f84671..cf5e82f39d3b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb, &chainname, &comment, &rulenum) != 0) break; - nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, - "TRACE: %s:%s:%s:%u ", - tablename, chainname, comment, rulenum); + nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, + "TRACE: %s:%s:%s:%u ", + tablename, chainname, comment, rulenum); } #endif diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e9f66e1cda50..208d5439e59b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk) kgid_t low, high; int ret = 0; + if (sk->sk_family == AF_INET6) + sk->sk_ipv6only = 1; + inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; @@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, if (addr_len < sizeof(*addr)) return -EINVAL; + if (addr->sin_family != AF_INET && + !(addr->sin_family == AF_UNSPEC && + addr->sin_addr.s_addr == htonl(INADDR_ANY))) + return -EAFNOSUPPORT; + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); @@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, return -EINVAL; if (addr->sin6_family != AF_INET6) - return -EINVAL; + return -EAFNOSUPPORT; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); @@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) - return -EINVAL; + return -EAFNOSUPPORT; daddr = usin->sin_addr.s_addr; /* no remote port */ } else { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9d72a0fcd928..995a2259bcfc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); - u32 new_size_goal, size_goal, hlen; + u32 new_size_goal, size_goal; if (!large_allowed || !sk_can_gso(sk)) return mss_now; - /* Maybe we should/could use sk->sk_prot->max_header here ? */ - hlen = inet_csk(sk)->icsk_af_ops->net_header_len + - inet_csk(sk)->icsk_ext_hdr_len + - tp->tcp_header_len; - - new_size_goal = sk->sk_gso_max_size - 1 - hlen; + /* Note : tcp_tso_autosize() will eventually split this later */ + new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); /* We try hard to avoid divides here */ diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index d694088214cd..62856e185a93 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); */ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) { + /* If credits accumulated at a higher w, apply them gently now. */ + if (tp->snd_cwnd_cnt >= w) { + tp->snd_cwnd_cnt = 0; + tp->snd_cwnd++; + } + tp->snd_cwnd_cnt += acked; if (tp->snd_cwnd_cnt >= w) { u32 delta = tp->snd_cwnd_cnt / w; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4b276d1ed980..06d3d665a9fd 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -306,8 +306,10 @@ tcp_friendliness: } } - if (ca->cnt == 0) /* cannot be zero */ - ca->cnt = 1; + /* The maximum rate of cwnd increase CUBIC allows is 1 packet per + * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. + */ + ca->cnt = max(ca->cnt, 2U); } static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a2a796c5536b..1db253e36045 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk) } else { /* Socket is locked, keep trying until memory is available. */ for (;;) { - skb = alloc_skb_fclone(MAX_TCP_HEADER, - sk->sk_allocation); + skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); if (skb) break; yield(); } - - /* Reserve space for headers and prepare control bits. */ - skb_reserve(skb, MAX_TCP_HEADER); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d5f6bd9a210a..dab73813cb92 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) return err; IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; + skb->protocol = htons(ETH_P_IP); return x->outer_mode->output2(x, skb); } @@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output); int xfrm4_output_finish(struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - skb->protocol = htons(ETH_P_IP); #ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index c215be70cac0..ace8daca5c83 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) kfree_skb(skb); } -static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) +/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL. + * + * At one point, excluding local errors was a quick test to identify icmp/icmp6 + * errors. This is no longer true, but the test remained, so the v6 stack, + * unlike v4, also honors cmsg requests on all wifi and timestamp errors. + * + * Timestamp code paths do not initialize the fields expected by cmsg: + * the PKTINFO fields in skb->cb[]. Fill those in here. + */ +static bool ip6_datagram_support_cmsg(struct sk_buff *skb, + struct sock_exterr_skb *serr) { - int ifindex = skb->dev ? skb->dev->ifindex : -1; + if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || + serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) + return true; + + if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) + return false; + + if (!skb->dev) + return false; if (skb->protocol == htons(ETH_P_IPV6)) - IP6CB(skb)->iif = ifindex; + IP6CB(skb)->iif = skb->dev->ifindex; else - PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; + PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; + + return true; } /* @@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) serr = SKB_EXT_ERR(skb); - if (sin && skb->len) { + if (sin && serr->port) { const unsigned char *nh = skb_network_header(skb); sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; @@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; memset(sin, 0, sizeof(*sin)); - if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { + + if (ip6_datagram_support_cmsg(skb, serr)) { sin->sin6_family = AF_INET6; - if (np->rxopt.all) { - if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && - serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) - ip6_datagram_prepare_pktinfo_errqueue(skb); + if (np->rxopt.all) ip6_datagram_recv_common_ctl(sk, msg, skb); - } if (skb->protocol == htons(ETH_P_IPV6)) { sin->sin6_addr = ipv6_hdr(skb)->saddr; if (np->rxopt.all) diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b4d5e1d97c1b..27ca79682efb 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto again; flp6->saddr = saddr; } + err = rt->dst.error; goto out; } again: diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0a04a37305d5..7e80b61b51ff 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) static inline int ip6_forward_finish(struct sk_buff *skb) { + skb_sender_cpu_clear(skb); return dst_output(skb); } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 266a264ec212..ddd94eca19b3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -314,7 +314,7 @@ out: * Create tunnel matching given parameters. * * Return: - * created tunnel or NULL + * created tunnel or error pointer **/ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) @@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; - int err; + int err = -ENOMEM; if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); @@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) failed_free: ip6_dev_free(dev); failed: - return NULL; + return ERR_PTR(err); } /** @@ -362,7 +362,7 @@ failed: * tunnel device is created and registered for use. * * Return: - * matching tunnel or NULL + * matching tunnel or error pointer **/ static struct ip6_tnl *ip6_tnl_locate(struct net *net, @@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net, if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr)) { if (create) - return NULL; + return ERR_PTR(-EEXIST); return t; } } if (!create) - return NULL; + return ERR_PTR(-ENODEV); return ip6_tnl_create(net, p); } @@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); - if (t == NULL) + if (IS_ERR(t)) t = netdev_priv(dev); } else { memset(&p, 0, sizeof(p)); @@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); if (cmd == SIOCCHGTUNNEL) { - if (t != NULL) { + if (!IS_ERR(t)) { if (t->dev != dev) { err = -EEXIST; break; @@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) else err = ip6_tnl_update(t, &p1); } - if (t) { + if (!IS_ERR(t)) { err = 0; ip6_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; - } else - err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); + } else { + err = PTR_ERR(t); + } break; case SIOCDELTUNNEL: err = -EPERM; @@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) err = -ENOENT; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); - if (t == NULL) + if (IS_ERR(t)) break; err = -EPERM; if (t->dev == ip6n->fb_tnl_dev) @@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net *net = dev_net(dev); - struct ip6_tnl *nt; + struct ip6_tnl *nt, *t; nt = netdev_priv(dev); ip6_tnl_netlink_parms(data, &nt->parms); - if (ip6_tnl_locate(net, &nt->parms, 0)) + t = ip6_tnl_locate(net, &nt->parms, 0); + if (!IS_ERR(t)) return -EEXIST; return ip6_tnl_create2(dev); @@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], ip6_tnl_netlink_parms(data, &p); t = ip6_tnl_locate(net, &p, 0); - - if (t) { + if (!IS_ERR(t)) { if (t->dev != dev) return -EEXIST; } else diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e080fbbbc0e5..bb00c6f2a885 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb, &chainname, &comment, &rulenum) != 0) break; - nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, - "TRACE: %s:%s:%s:%u ", - tablename, chainname, comment, rulenum); + nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, + "TRACE: %s:%s:%s:%u ", + tablename, chainname, comment, rulenum); } #endif diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index bd46f736f61d..a2dfff6ff227 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); - if (msg->msg_namelen < sizeof(struct sockaddr_in6) || - u->sin6_family != AF_INET6) { + if (msg->msg_namelen < sizeof(*u)) return -EINVAL; + if (u->sin6_family != AF_INET6) { + return -EAFNOSUPPORT; } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != u->sin6_scope_id) { diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ab889bb16b3c..be2c0ba82c85 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; - if (skb_shinfo(skb)->ip6_frag_id) - fptr->identification = skb_shinfo(skb)->ip6_frag_id; - else - ipv6_select_ident(fptr, - (struct rt6_info *)skb_dst(skb)); + if (!skb_shinfo(skb)->ip6_frag_id) + ipv6_proxy_select_ident(skb); + fptr->identification = skb_shinfo(skb)->ip6_frag_id; /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index ca3f29b98ae5..010f8bd2d577 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) return err; skb->ignore_df = 1; + skb->protocol = htons(ETH_P_IPV6); return x->outer_mode->output2(x, skb); } @@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output); int xfrm6_output_finish(struct sk_buff *skb) { memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); - skb->protocol = htons(ETH_P_IPV6); #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 48bf5a06847b..8d2d01b4800a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPPROTO_MH: + offset += ipv6_optlen(exthdr); if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3afe36824703..8d53d65bd2ab 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -58,13 +58,24 @@ struct ieee80211_local; #define IEEE80211_UNSET_POWER_LEVEL INT_MIN /* - * Some APs experience problems when working with U-APSD. Decrease the - * probability of that happening by using legacy mode for all ACs but VO. - * The AP that caused us trouble was a Cisco 4410N. It ignores our - * setting, and always treats non-VO ACs as legacy. + * Some APs experience problems when working with U-APSD. Decreasing the + * probability of that happening by using legacy mode for all ACs but VO isn't + * enough. + * + * Cisco 4410N originally forced us to enable VO by default only because it + * treated non-VO ACs as legacy. + * + * However some APs (notably Netgear R7000) silently reclassify packets to + * different ACs. Since u-APSD ACs require trigger frames for frame retrieval + * clients would never see some frames (e.g. ARP responses) or would fetch them + * accidentally after a long time. + * + * It makes little sense to enable u-APSD queues by default because it needs + * userspace applications to be aware of it to actually take advantage of the + * possible additional powersavings. Implicitly depending on driver autotrigger + * frame support doesn't make much sense. */ -#define IEEE80211_DEFAULT_UAPSD_QUEUES \ - IEEE80211_WMM_IE_STA_QOSINFO_AC_VO +#define IEEE80211_DEFAULT_UAPSD_QUEUES 0 #define IEEE80211_DEFAULT_MAX_SP_LEN \ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL @@ -453,6 +464,7 @@ struct ieee80211_if_managed { unsigned int flags; bool csa_waiting_bcn; + bool csa_ignored_same_chan; bool beacon_crc_valid; u32 beacon_crc; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 10ac6324c1d0..142f66aece18 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + if (cfg80211_chandef_identical(&csa_ie.chandef, + &sdata->vif.bss_conf.chandef)) { + if (ifmgd->csa_ignored_same_chan) + return; + sdata_info(sdata, + "AP %pM tries to chanswitch to same channel, ignore\n", + ifmgd->associated->bssid); + ifmgd->csa_ignored_same_chan = true; + return; + } + mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, @@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->vif.csa_active = true; sdata->csa_chandef = csa_ie.chandef; sdata->csa_block_tx = csa_ie.mode; + ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) ieee80211_stop_vif_queues(local, sdata, @@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; + ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -3204,7 +3217,8 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | - (1ULL << WLAN_EID_HT_OPERATION); + (1ULL << WLAN_EID_HT_OPERATION) | + (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1101563357ea..944bdc04e913 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) hdr = (struct ieee80211_hdr *) skb->data; mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) + return RX_DROP_MONITOR; + /* frame is in RMC, don't forward */ if (ieee80211_is_data(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8428f4a95479..747bdcf72e92 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, wdev_iter = &sdata_iter->wdev; if (sdata_iter == sdata || - rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || + !ieee80211_sdata_running(sdata_iter) || local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) continue; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index c47ffd7a0a70..d93ceeb3ef04 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } + if (!(flags & IP_VS_CONN_F_TEMPLATE)) + kfree(param->pe_data); } if (opt) @@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #endif + ip_vs_pe_put(param.pe); return 0; /* Error exit */ out: diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 0d8448f19dfe..675d12c69e32 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -212,6 +212,30 @@ void nf_log_packet(struct net *net, } EXPORT_SYMBOL(nf_log_packet); +void nf_log_trace(struct net *net, + u_int8_t pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *loginfo, const char *fmt, ...) +{ + va_list args; + char prefix[NF_LOG_PREFIXLEN]; + const struct nf_logger *logger; + + rcu_read_lock(); + logger = rcu_dereference(net->nf.nf_loggers[pf]); + if (logger) { + va_start(args, fmt); + vsnprintf(prefix, sizeof(prefix), fmt, args); + va_end(args); + logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(nf_log_trace); + #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) struct nf_log_buf { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 199fd0f27b0e..ac1a9528dbf2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) { - rule->genmask = 0; + rule->genmask &= ~(1 << gencursor_next(net)); } static int @@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, if (nla[NFTA_CHAIN_POLICY]) { if ((chain != NULL && - !(chain->flags & NFT_BASE_CHAIN)) || + !(chain->flags & NFT_BASE_CHAIN))) + return -EOPNOTSUPP; + + if (chain == NULL && nla[NFTA_CHAIN_HOOK] == NULL) return -EOPNOTSUPP; @@ -1711,9 +1714,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, } nla_nest_end(skb, list); - if (rule->ulen && - nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) - goto nla_put_failure; + if (rule->udata) { + struct nft_userdata *udata = nft_userdata(rule); + if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1, + udata->data) < 0) + goto nla_put_failure; + } nlmsg_end(skb, nlh); return 0; @@ -1896,11 +1902,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, struct nft_table *table; struct nft_chain *chain; struct nft_rule *rule, *old_rule = NULL; + struct nft_userdata *udata; struct nft_trans *trans = NULL; struct nft_expr *expr; struct nft_ctx ctx; struct nlattr *tmp; - unsigned int size, i, n, ulen = 0; + unsigned int size, i, n, ulen = 0, usize = 0; int err, rem; bool create; u64 handle, pos_handle; @@ -1968,12 +1975,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, n++; } } + /* Check for overflow of dlen field */ + err = -EFBIG; + if (size >= 1 << 12) + goto err1; - if (nla[NFTA_RULE_USERDATA]) + if (nla[NFTA_RULE_USERDATA]) { ulen = nla_len(nla[NFTA_RULE_USERDATA]); + if (ulen > 0) + usize = sizeof(struct nft_userdata) + ulen; + } err = -ENOMEM; - rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); + rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); if (rule == NULL) goto err1; @@ -1981,10 +1995,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, rule->handle = handle; rule->dlen = size; - rule->ulen = ulen; + rule->udata = ulen ? 1 : 0; - if (ulen) - nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); + if (ulen) { + udata = nft_userdata(rule); + udata->len = ulen - 1; + nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen); + } expr = nft_expr_first(rule); for (i = 0; i < n; i++) { @@ -2031,12 +2048,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, err3: list_del_rcu(&rule->list); - if (trans) { - list_del_rcu(&nft_trans_rule(trans)->list); - nft_rule_clear(net, nft_trans_rule(trans)); - nft_trans_destroy(trans); - chain->use++; - } err2: nf_tables_rule_destroy(&ctx, rule); err1: @@ -3612,12 +3623,11 @@ static int nf_tables_commit(struct sk_buff *skb) &te->elem, NFT_MSG_DELSETELEM, 0); te->set->ops->get(te->set, &te->elem); - te->set->ops->remove(te->set, &te->elem); nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); - if (te->elem.flags & NFT_SET_MAP) { - nft_data_uninit(&te->elem.data, - te->set->dtype); - } + if (te->set->flags & NFT_SET_MAP && + !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) + nft_data_uninit(&te->elem.data, te->set->dtype); + te->set->ops->remove(te->set, &te->elem); nft_trans_destroy(trans); break; } @@ -3658,7 +3668,7 @@ static int nf_tables_abort(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); struct nft_trans *trans, *next; - struct nft_set *set; + struct nft_trans_elem *te; list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { @@ -3719,9 +3729,13 @@ static int nf_tables_abort(struct sk_buff *skb) break; case NFT_MSG_NEWSETELEM: nft_trans_elem_set(trans)->nelems--; - set = nft_trans_elem_set(trans); - set->ops->get(set, &nft_trans_elem(trans)); - set->ops->remove(set, &nft_trans_elem(trans)); + te = (struct nft_trans_elem *)trans->data; + te->set->ops->get(te->set, &te->elem); + nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); + if (te->set->flags & NFT_SET_MAP && + !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) + nft_data_uninit(&te->elem.data, te->set->dtype); + te->set->ops->remove(te->set, &te->elem); nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3b90eb2b2c55..2d298dccb6dd 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt, { struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); - nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, - pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", - chain->table->name, chain->name, comments[type], - rulenum); + nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, + pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", + chain->table->name, chain->name, comments[type], + rulenum); } unsigned int diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index a5599fc51a6f..54330fb5efaf 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) return -EINVAL; + /* Not all fields are initialized so first zero the tuple */ + memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); + tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 1279cd85663e..65f3e2b6be44 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -123,7 +123,7 @@ static void nft_target_set_tgchk_param(struct xt_tgchk_param *par, const struct nft_ctx *ctx, struct xt_target *target, void *info, - union nft_entry *entry, u8 proto, bool inv) + union nft_entry *entry, u16 proto, bool inv) { par->net = ctx->net; par->table = ctx->table->name; @@ -133,11 +133,14 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; break; case AF_INET6: + if (proto) + entry->e6.ipv6.flags |= IP6T_F_PROTO; + entry->e6.ipv6.proto = proto; entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; break; case NFPROTO_BRIDGE: - entry->ebt.ethproto = proto; + entry->ebt.ethproto = (__force __be16)proto; entry->ebt.invflags = inv ? EBT_IPROTO : 0; break; } @@ -171,7 +174,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, }; -static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) +static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) { struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; u32 flags; @@ -203,7 +206,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, struct xt_target *target = expr->ops->data; struct xt_tgchk_param par; size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); - u8 proto = 0; + u16 proto = 0; bool inv = false; union nft_entry e = {}; int ret; @@ -334,7 +337,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { static void nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, struct xt_match *match, void *info, - union nft_entry *entry, u8 proto, bool inv) + union nft_entry *entry, u16 proto, bool inv) { par->net = ctx->net; par->table = ctx->table->name; @@ -344,11 +347,14 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; break; case AF_INET6: + if (proto) + entry->e6.ipv6.flags |= IP6T_F_PROTO; + entry->e6.ipv6.proto = proto; entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; break; case NFPROTO_BRIDGE: - entry->ebt.ethproto = proto; + entry->ebt.ethproto = (__force __be16)proto; entry->ebt.invflags = inv ? EBT_IPROTO : 0; break; } @@ -385,7 +391,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, struct xt_match *match = expr->ops->data; struct xt_mtchk_param par; size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); - u8 proto = 0; + u16 proto = 0; bool inv = false; union nft_entry e = {}; int ret; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index c82df0a48fcd..37c15e674884 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, iter->err = err; goto out; } + + continue; } if (iter->count < iter->skip) diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index ef8a926752a9..50e1e5aaf4ce 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; - if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) - && !(i->flags & IP6T_INV_PROTO)) + if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && + !(i->invflags & IP6T_INV_PROTO)) return 0; pr_info("Can be used only in combination with " diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5bf1e968a728..f8db7064d81c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, return 0; } -static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) +static void packet_dev_mclist_delete(struct net_device *dev, + struct packet_mclist **mlp) { - for ( ; i; i = i->next) { - if (i->ifindex == dev->ifindex) - packet_dev_mc(dev, i, what); + struct packet_mclist *ml; + + while ((ml = *mlp) != NULL) { + if (ml->ifindex == dev->ifindex) { + packet_dev_mc(dev, ml, -1); + *mlp = ml->next; + kfree(ml); + } else + mlp = &ml->next; } } @@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) packet_dev_mc(dev, ml, -1); kfree(ml); } - rtnl_unlock(); - return 0; + break; } } rtnl_unlock(); - return -EADDRNOTAVAIL; + return 0; } static void packet_flush_mclist(struct sock *sk) @@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this, switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) - packet_dev_mclist(dev, po->mclist, -1); + packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index a817705ce2d0..dba8d0864f18 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c @@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, int *unpinned); static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); -static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) +static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, + struct rds_iw_device **rds_iwdev, + struct rdma_cm_id **cm_id) { struct rds_iw_device *iwdev; struct rds_iw_cm_id *i_cm_id; @@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd src_addr->sin_port, dst_addr->sin_addr.s_addr, dst_addr->sin_port, - rs->rs_bound_addr, - rs->rs_bound_port, - rs->rs_conn_addr, - rs->rs_conn_port); + src->sin_addr.s_addr, + src->sin_port, + dst->sin_addr.s_addr, + dst->sin_port); #ifdef WORKING_TUPLE_DETECTION - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && - src_addr->sin_port == rs->rs_bound_port && - dst_addr->sin_addr.s_addr == rs->rs_conn_addr && - dst_addr->sin_port == rs->rs_conn_port) { + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && + src_addr->sin_port == src->sin_port && + dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && + dst_addr->sin_port == dst->sin_port) { #else /* FIXME - needs to compare the local and remote * ipaddr/port tuple, but the ipaddr is the only @@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd * zero'ed. It doesn't appear to be properly populated * during connection setup... */ - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { #endif spin_unlock_irq(&iwdev->spinlock); *rds_iwdev = iwdev; @@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i { struct sockaddr_in *src_addr, *dst_addr; struct rds_iw_device *rds_iwdev_old; - struct rds_sock rs; struct rdma_cm_id *pcm_id; int rc; src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; - rs.rs_bound_addr = src_addr->sin_addr.s_addr; - rs.rs_bound_port = src_addr->sin_port; - rs.rs_conn_addr = dst_addr->sin_addr.s_addr; - rs.rs_conn_port = dst_addr->sin_port; - - rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); + rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); if (rc) rds_iw_remove_cm_id(rds_iwdev, cm_id); @@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_iw_device *rds_iwdev; struct rds_iw_mr *ibmr = NULL; struct rdma_cm_id *cm_id; + struct sockaddr_in src = { + .sin_addr.s_addr = rs->rs_bound_addr, + .sin_port = rs->rs_bound_port, + }; + struct sockaddr_in dst = { + .sin_addr.s_addr = rs->rs_conn_addr, + .sin_port = rs->rs_conn_port, + }; int ret; - ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); + ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); if (ret || !cm_id) { ret = -ENODEV; goto out; diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index 5394b6be46ec..0610efa83d72 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c @@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk) _leave("UDP socket errqueue empty"); return; } - if (!skb->len) { + serr = SKB_EXT_ERR(skb); + if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { _leave("UDP empty message"); kfree_skb(skb); return; @@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk) rxrpc_new_skb(skb); - serr = SKB_EXT_ERR(skb); addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); port = serr->port; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 4575485ad1b4..19a560626dc4 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) { /* nothing remains on the queue */ if (copied && - (msg->msg_flags & MSG_PEEK || timeo == 0)) + (flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 82c5d7fc1988..5f6288fa3f12 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_bpf *b = a->priv; - int action; - int filter_res; + int action, filter_res; spin_lock(&b->tcf_lock); + b->tcf_tm.lastuse = jiffies; bstats_update(&b->tcf_bstats, skb); - action = b->tcf_action; filter_res = BPF_PROG_RUN(b->filter, skb); - if (filter_res == 0) { - /* Return code 0 from the BPF program - * is being interpreted as a drop here. - */ - action = TC_ACT_SHOT; + + /* A BPF program may overwrite the default action opcode. + * Similarly as in cls_bpf, if filter_res == -1 we use the + * default action specified from tc. + * + * In case a different well-known TC_ACT opcode has been + * returned, it will overwrite the default one. + * + * For everything else that is unkown, TC_ACT_UNSPEC is + * returned. + */ + switch (filter_res) { + case TC_ACT_PIPE: + case TC_ACT_RECLASSIFY: + case TC_ACT_OK: + action = filter_res; + break; + case TC_ACT_SHOT: + action = filter_res; b->tcf_qstats.drops++; + break; + case TC_ACT_UNSPEC: + action = b->tcf_action; + break; + default: + action = TC_ACT_UNSPEC; + break; } spin_unlock(&b->tcf_lock); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 09487afbfd51..95fdf4e40051 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -78,8 +78,11 @@ struct tc_u_hnode { struct tc_u_common *tp_c; int refcnt; unsigned int divisor; - struct tc_u_knode __rcu *ht[1]; struct rcu_head rcu; + /* The 'ht' field MUST be the last field in structure to allow for + * more entries allocated at end of structure. + */ + struct tc_u_knode __rcu *ht[1]; }; struct tc_u_common { diff --git a/net/socket.c b/net/socket.c index bbedbfcb42c2..245330ca0015 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, if (len > INT_MAX) len = INT_MAX; + if (unlikely(!access_ok(VERIFY_READ, buff, len))) + return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, if (size > INT_MAX) size = INT_MAX; + if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size))) + return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile index da5136fd5694..579f72bbcf4b 100644 --- a/net/sunrpc/xprtrdma/Makefile +++ b/net/sunrpc/xprtrdma/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o -xprtrdma-y := transport.o rpc_rdma.o verbs.o +xprtrdma-y := transport.o rpc_rdma.o verbs.o \ + fmr_ops.o frwr_ops.o physical_ops.o obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c new file mode 100644 index 000000000000..a91ba2c8ef1e --- /dev/null +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2015 Oracle. All rights reserved. + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + */ + +/* Lightweight memory registration using Fast Memory Regions (FMR). + * Referred to sometimes as MTHCAFMR mode. + * + * FMR uses synchronous memory registration and deregistration. + * FMR registration is known to be fast, but FMR deregistration + * can take tens of usecs to complete. + */ + +#include "xprt_rdma.h" + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +# define RPCDBG_FACILITY RPCDBG_TRANS +#endif + +/* Maximum scatter/gather per FMR */ +#define RPCRDMA_MAX_FMR_SGES (64) + +static int +fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, + struct rpcrdma_create_data_internal *cdata) +{ + return 0; +} + +/* FMR mode conveys up to 64 pages of payload per chunk segment. + */ +static size_t +fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) +{ + return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, + rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES); +} + +static int +fmr_op_init(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; + struct ib_fmr_attr fmr_attr = { + .max_pages = RPCRDMA_MAX_FMR_SGES, + .max_maps = 1, + .page_shift = PAGE_SHIFT + }; + struct ib_pd *pd = r_xprt->rx_ia.ri_pd; + struct rpcrdma_mw *r; + int i, rc; + + INIT_LIST_HEAD(&buf->rb_mws); + INIT_LIST_HEAD(&buf->rb_all); + + i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; + dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); + + while (i--) { + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return -ENOMEM; + + r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); + if (IS_ERR(r->r.fmr)) + goto out_fmr_err; + + list_add(&r->mw_list, &buf->rb_mws); + list_add(&r->mw_all, &buf->rb_all); + } + return 0; + +out_fmr_err: + rc = PTR_ERR(r->r.fmr); + dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc); + kfree(r); + return rc; +} + +/* Use the ib_map_phys_fmr() verb to register a memory region + * for remote access via RDMA READ or RDMA WRITE. + */ +static int +fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, + int nsegs, bool writing) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct ib_device *device = ia->ri_id->device; + enum dma_data_direction direction = rpcrdma_data_dir(writing); + struct rpcrdma_mr_seg *seg1 = seg; + struct rpcrdma_mw *mw = seg1->rl_mw; + u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; + int len, pageoff, i, rc; + + pageoff = offset_in_page(seg1->mr_offset); + seg1->mr_offset -= pageoff; /* start of page */ + seg1->mr_len += pageoff; + len = -pageoff; + if (nsegs > RPCRDMA_MAX_FMR_SGES) + nsegs = RPCRDMA_MAX_FMR_SGES; + for (i = 0; i < nsegs;) { + rpcrdma_map_one(device, seg, direction); + physaddrs[i] = seg->mr_dma; + len += seg->mr_len; + ++seg; + ++i; + /* Check for holes */ + if ((i < nsegs && offset_in_page(seg->mr_offset)) || + offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) + break; + } + + rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma); + if (rc) + goto out_maperr; + + seg1->mr_rkey = mw->r.fmr->rkey; + seg1->mr_base = seg1->mr_dma + pageoff; + seg1->mr_nsegs = i; + seg1->mr_len = len; + return i; + +out_maperr: + dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", + __func__, len, (unsigned long long)seg1->mr_dma, + pageoff, i, rc); + while (i--) + rpcrdma_unmap_one(device, --seg); + return rc; +} + +/* Use the ib_unmap_fmr() verb to prevent further remote + * access via RDMA READ or RDMA WRITE. + */ +static int +fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_mr_seg *seg1 = seg; + struct ib_device *device; + int rc, nsegs = seg->mr_nsegs; + LIST_HEAD(l); + + list_add(&seg1->rl_mw->r.fmr->list, &l); + rc = ib_unmap_fmr(&l); + read_lock(&ia->ri_qplock); + device = ia->ri_id->device; + while (seg1->mr_nsegs--) + rpcrdma_unmap_one(device, seg++); + read_unlock(&ia->ri_qplock); + if (rc) + goto out_err; + return nsegs; + +out_err: + dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc); + return nsegs; +} + +/* After a disconnect, unmap all FMRs. + * + * This is invoked only in the transport connect worker in order + * to serialize with rpcrdma_register_fmr_external(). + */ +static void +fmr_op_reset(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_mw *r; + LIST_HEAD(list); + int rc; + + list_for_each_entry(r, &buf->rb_all, mw_all) + list_add(&r->r.fmr->list, &list); + + rc = ib_unmap_fmr(&list); + if (rc) + dprintk("RPC: %s: ib_unmap_fmr failed %i\n", + __func__, rc); +} + +static void +fmr_op_destroy(struct rpcrdma_buffer *buf) +{ + struct rpcrdma_mw *r; + int rc; + + while (!list_empty(&buf->rb_all)) { + r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); + list_del(&r->mw_all); + rc = ib_dealloc_fmr(r->r.fmr); + if (rc) + dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", + __func__, rc); + kfree(r); + } +} + +const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { + .ro_map = fmr_op_map, + .ro_unmap = fmr_op_unmap, + .ro_open = fmr_op_open, + .ro_maxpages = fmr_op_maxpages, + .ro_init = fmr_op_init, + .ro_reset = fmr_op_reset, + .ro_destroy = fmr_op_destroy, + .ro_displayname = "fmr", +}; diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c new file mode 100644 index 000000000000..0a7b9df70133 --- /dev/null +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2015 Oracle. All rights reserved. + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + */ + +/* Lightweight memory registration using Fast Registration Work + * Requests (FRWR). Also referred to sometimes as FRMR mode. + * + * FRWR features ordered asynchronous registration and deregistration + * of arbitrarily sized memory regions. This is the fastest and safest + * but most complex memory registration mode. + */ + +#include "xprt_rdma.h" + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +# define RPCDBG_FACILITY RPCDBG_TRANS +#endif + +static int +__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, + unsigned int depth) +{ + struct rpcrdma_frmr *f = &r->r.frmr; + int rc; + + f->fr_mr = ib_alloc_fast_reg_mr(pd, depth); + if (IS_ERR(f->fr_mr)) + goto out_mr_err; + f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth); + if (IS_ERR(f->fr_pgl)) + goto out_list_err; + return 0; + +out_mr_err: + rc = PTR_ERR(f->fr_mr); + dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n", + __func__, rc); + return rc; + +out_list_err: + rc = PTR_ERR(f->fr_pgl); + dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n", + __func__, rc); + ib_dereg_mr(f->fr_mr); + return rc; +} + +static void +__frwr_release(struct rpcrdma_mw *r) +{ + int rc; + + rc = ib_dereg_mr(r->r.frmr.fr_mr); + if (rc) + dprintk("RPC: %s: ib_dereg_mr status %i\n", + __func__, rc); + ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); +} + +static int +frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, + struct rpcrdma_create_data_internal *cdata) +{ + struct ib_device_attr *devattr = &ia->ri_devattr; + int depth, delta; + + ia->ri_max_frmr_depth = + min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, + devattr->max_fast_reg_page_list_len); + dprintk("RPC: %s: device's max FR page list len = %u\n", + __func__, ia->ri_max_frmr_depth); + + /* Add room for frmr register and invalidate WRs. + * 1. FRMR reg WR for head + * 2. FRMR invalidate WR for head + * 3. N FRMR reg WRs for pagelist + * 4. N FRMR invalidate WRs for pagelist + * 5. FRMR reg WR for tail + * 6. FRMR invalidate WR for tail + * 7. The RDMA_SEND WR + */ + depth = 7; + + /* Calculate N if the device max FRMR depth is smaller than + * RPCRDMA_MAX_DATA_SEGS. + */ + if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { + delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; + do { + depth += 2; /* FRMR reg + invalidate */ + delta -= ia->ri_max_frmr_depth; + } while (delta > 0); + } + + ep->rep_attr.cap.max_send_wr *= depth; + if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { + cdata->max_requests = devattr->max_qp_wr / depth; + if (!cdata->max_requests) + return -EINVAL; + ep->rep_attr.cap.max_send_wr = cdata->max_requests * + depth; + } + + return 0; +} + +/* FRWR mode conveys a list of pages per chunk segment. The + * maximum length of that list is the FRWR page list depth. + */ +static size_t +frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + + return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, + rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); +} + +/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */ +static void +frwr_sendcompletion(struct ib_wc *wc) +{ + struct rpcrdma_mw *r; + + if (likely(wc->status == IB_WC_SUCCESS)) + return; + + /* WARNING: Only wr_id and status are reliable at this point */ + r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; + dprintk("RPC: %s: frmr %p (stale), status %d\n", + __func__, r, wc->status); + r->r.frmr.fr_state = FRMR_IS_STALE; +} + +static int +frwr_op_init(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct ib_device *device = r_xprt->rx_ia.ri_id->device; + unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; + struct ib_pd *pd = r_xprt->rx_ia.ri_pd; + int i; + + INIT_LIST_HEAD(&buf->rb_mws); + INIT_LIST_HEAD(&buf->rb_all); + + i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; + dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); + + while (i--) { + struct rpcrdma_mw *r; + int rc; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return -ENOMEM; + + rc = __frwr_init(r, pd, device, depth); + if (rc) { + kfree(r); + return rc; + } + + list_add(&r->mw_list, &buf->rb_mws); + list_add(&r->mw_all, &buf->rb_all); + r->mw_sendcompletion = frwr_sendcompletion; + } + + return 0; +} + +/* Post a FAST_REG Work Request to register a memory region + * for remote access via RDMA READ or RDMA WRITE. + */ +static int +frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, + int nsegs, bool writing) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct ib_device *device = ia->ri_id->device; + enum dma_data_direction direction = rpcrdma_data_dir(writing); + struct rpcrdma_mr_seg *seg1 = seg; + struct rpcrdma_mw *mw = seg1->rl_mw; + struct rpcrdma_frmr *frmr = &mw->r.frmr; + struct ib_mr *mr = frmr->fr_mr; + struct ib_send_wr fastreg_wr, *bad_wr; + u8 key; + int len, pageoff; + int i, rc; + int seg_len; + u64 pa; + int page_no; + + pageoff = offset_in_page(seg1->mr_offset); + seg1->mr_offset -= pageoff; /* start of page */ + seg1->mr_len += pageoff; + len = -pageoff; + if (nsegs > ia->ri_max_frmr_depth) + nsegs = ia->ri_max_frmr_depth; + for (page_no = i = 0; i < nsegs;) { + rpcrdma_map_one(device, seg, direction); + pa = seg->mr_dma; + for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { + frmr->fr_pgl->page_list[page_no++] = pa; + pa += PAGE_SIZE; + } + len += seg->mr_len; + ++seg; + ++i; + /* Check for holes */ + if ((i < nsegs && offset_in_page(seg->mr_offset)) || + offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) + break; + } + dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n", + __func__, mw, i, len); + + frmr->fr_state = FRMR_IS_VALID; + + memset(&fastreg_wr, 0, sizeof(fastreg_wr)); + fastreg_wr.wr_id = (unsigned long)(void *)mw; + fastreg_wr.opcode = IB_WR_FAST_REG_MR; + fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff; + fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; + fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; + fastreg_wr.wr.fast_reg.page_list_len = page_no; + fastreg_wr.wr.fast_reg.length = len; + fastreg_wr.wr.fast_reg.access_flags = writing ? + IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : + IB_ACCESS_REMOTE_READ; + key = (u8)(mr->rkey & 0x000000FF); + ib_update_fast_reg_key(mr, ++key); + fastreg_wr.wr.fast_reg.rkey = mr->rkey; + + DECR_CQCOUNT(&r_xprt->rx_ep); + rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); + if (rc) + goto out_senderr; + + seg1->mr_rkey = mr->rkey; + seg1->mr_base = seg1->mr_dma + pageoff; + seg1->mr_nsegs = i; + seg1->mr_len = len; + return i; + +out_senderr: + dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); + ib_update_fast_reg_key(mr, --key); + frmr->fr_state = FRMR_IS_INVALID; + while (i--) + rpcrdma_unmap_one(device, --seg); + return rc; +} + +/* Post a LOCAL_INV Work Request to prevent further remote access + * via RDMA READ or RDMA WRITE. + */ +static int +frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) +{ + struct rpcrdma_mr_seg *seg1 = seg; + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct ib_send_wr invalidate_wr, *bad_wr; + int rc, nsegs = seg->mr_nsegs; + struct ib_device *device; + + seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; + + memset(&invalidate_wr, 0, sizeof(invalidate_wr)); + invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; + invalidate_wr.opcode = IB_WR_LOCAL_INV; + invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; + DECR_CQCOUNT(&r_xprt->rx_ep); + + read_lock(&ia->ri_qplock); + device = ia->ri_id->device; + while (seg1->mr_nsegs--) + rpcrdma_unmap_one(device, seg++); + rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); + read_unlock(&ia->ri_qplock); + if (rc) + goto out_err; + return nsegs; + +out_err: + /* Force rpcrdma_buffer_get() to retry */ + seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; + dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); + return nsegs; +} + +/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in + * an unusable state. Find FRMRs in this state and dereg / reg + * each. FRMRs that are VALID and attached to an rpcrdma_req are + * also torn down. + * + * This gives all in-use FRMRs a fresh rkey and leaves them INVALID. + * + * This is invoked only in the transport connect worker in order + * to serialize with rpcrdma_register_frmr_external(). + */ +static void +frwr_op_reset(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct ib_device *device = r_xprt->rx_ia.ri_id->device; + unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; + struct ib_pd *pd = r_xprt->rx_ia.ri_pd; + struct rpcrdma_mw *r; + int rc; + + list_for_each_entry(r, &buf->rb_all, mw_all) { + if (r->r.frmr.fr_state == FRMR_IS_INVALID) + continue; + + __frwr_release(r); + rc = __frwr_init(r, pd, device, depth); + if (rc) { + dprintk("RPC: %s: mw %p left %s\n", + __func__, r, + (r->r.frmr.fr_state == FRMR_IS_STALE ? + "stale" : "valid")); + continue; + } + + r->r.frmr.fr_state = FRMR_IS_INVALID; + } +} + +static void +frwr_op_destroy(struct rpcrdma_buffer *buf) +{ + struct rpcrdma_mw *r; + + while (!list_empty(&buf->rb_all)) { + r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); + list_del(&r->mw_all); + __frwr_release(r); + kfree(r); + } +} + +const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { + .ro_map = frwr_op_map, + .ro_unmap = frwr_op_unmap, + .ro_open = frwr_op_open, + .ro_maxpages = frwr_op_maxpages, + .ro_init = frwr_op_init, + .ro_reset = frwr_op_reset, + .ro_destroy = frwr_op_destroy, + .ro_displayname = "frwr", +}; diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c new file mode 100644 index 000000000000..ba518af16787 --- /dev/null +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2015 Oracle. All rights reserved. + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + */ + +/* No-op chunk preparation. All client memory is pre-registered. + * Sometimes referred to as ALLPHYSICAL mode. + * + * Physical registration is simple because all client memory is + * pre-registered and never deregistered. This mode is good for + * adapter bring up, but is considered not safe: the server is + * trusted not to abuse its access to client memory not involved + * in RDMA I/O. + */ + +#include "xprt_rdma.h" + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +# define RPCDBG_FACILITY RPCDBG_TRANS +#endif + +static int +physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, + struct rpcrdma_create_data_internal *cdata) +{ + return 0; +} + +/* PHYSICAL memory registration conveys one page per chunk segment. + */ +static size_t +physical_op_maxpages(struct rpcrdma_xprt *r_xprt) +{ + return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, + rpcrdma_max_segments(r_xprt)); +} + +static int +physical_op_init(struct rpcrdma_xprt *r_xprt) +{ + return 0; +} + +/* The client's physical memory is already exposed for + * remote access via RDMA READ or RDMA WRITE. + */ +static int +physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, + int nsegs, bool writing) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + + rpcrdma_map_one(ia->ri_id->device, seg, + rpcrdma_data_dir(writing)); + seg->mr_rkey = ia->ri_bind_mem->rkey; + seg->mr_base = seg->mr_dma; + seg->mr_nsegs = 1; + return 1; +} + +/* Unmap a memory region, but leave it registered. + */ +static int +physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) +{ + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + + read_lock(&ia->ri_qplock); + rpcrdma_unmap_one(ia->ri_id->device, seg); + read_unlock(&ia->ri_qplock); + + return 1; +} + +static void +physical_op_reset(struct rpcrdma_xprt *r_xprt) +{ +} + +static void +physical_op_destroy(struct rpcrdma_buffer *buf) +{ +} + +const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { + .ro_map = physical_op_map, + .ro_unmap = physical_op_unmap, + .ro_open = physical_op_open, + .ro_maxpages = physical_op_maxpages, + .ro_init = physical_op_init, + .ro_reset = physical_op_reset, + .ro_destroy = physical_op_destroy, + .ro_displayname = "physical", +}; diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 91ffde82fa0c..2c53ea9e1b83 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -53,6 +53,14 @@ # define RPCDBG_FACILITY RPCDBG_TRANS #endif +enum rpcrdma_chunktype { + rpcrdma_noch = 0, + rpcrdma_readch, + rpcrdma_areadch, + rpcrdma_writech, + rpcrdma_replych +}; + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static const char transfertypes[][12] = { "pure inline", /* no chunks */ @@ -179,6 +187,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, struct rpcrdma_write_array *warray = NULL; struct rpcrdma_write_chunk *cur_wchunk = NULL; __be32 *iptr = headerp->rm_body.rm_chunks; + int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool); if (type == rpcrdma_readch || type == rpcrdma_areadch) { /* a read chunk - server will RDMA Read our memory */ @@ -201,9 +210,9 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, if (nsegs < 0) return nsegs; + map = r_xprt->rx_ia.ri_ops->ro_map; do { - n = rpcrdma_register_external(seg, nsegs, - cur_wchunk != NULL, r_xprt); + n = map(r_xprt, seg, nsegs, cur_wchunk != NULL); if (n <= 0) goto out; if (cur_rchunk) { /* read */ @@ -275,34 +284,13 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, return (unsigned char *)iptr - (unsigned char *)headerp; out: - if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) { - for (pos = 0; nchunks--;) - pos += rpcrdma_deregister_external( - &req->rl_segments[pos], r_xprt); - } - return n; -} + if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) + return n; -/* - * Marshal chunks. This routine returns the header length - * consumed by marshaling. - * - * Returns positive RPC/RDMA header size, or negative errno. - */ - -ssize_t -rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result) -{ - struct rpcrdma_req *req = rpcr_to_rdmar(rqst); - struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf); - - if (req->rl_rtype != rpcrdma_noch) - result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, - headerp, req->rl_rtype); - else if (req->rl_wtype != rpcrdma_noch) - result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, - headerp, req->rl_wtype); - return result; + for (pos = 0; nchunks--;) + pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, + &req->rl_segments[pos]); + return n; } /* @@ -397,6 +385,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) char *base; size_t rpclen, padlen; ssize_t hdrlen; + enum rpcrdma_chunktype rtype, wtype; struct rpcrdma_msg *headerp; /* @@ -433,13 +422,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) * into pages; otherwise use reply chunks. */ if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) - req->rl_wtype = rpcrdma_noch; + wtype = rpcrdma_noch; else if (rqst->rq_rcv_buf.page_len == 0) - req->rl_wtype = rpcrdma_replych; + wtype = rpcrdma_replych; else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) - req->rl_wtype = rpcrdma_writech; + wtype = rpcrdma_writech; else - req->rl_wtype = rpcrdma_replych; + wtype = rpcrdma_replych; /* * Chunks needed for arguments? @@ -456,16 +445,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) * TBD check NFSv4 setacl */ if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) - req->rl_rtype = rpcrdma_noch; + rtype = rpcrdma_noch; else if (rqst->rq_snd_buf.page_len == 0) - req->rl_rtype = rpcrdma_areadch; + rtype = rpcrdma_areadch; else - req->rl_rtype = rpcrdma_readch; + rtype = rpcrdma_readch; /* The following simplification is not true forever */ - if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych) - req->rl_wtype = rpcrdma_noch; - if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) { + if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) + wtype = rpcrdma_noch; + if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { dprintk("RPC: %s: cannot marshal multiple chunk lists\n", __func__); return -EIO; @@ -479,7 +468,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) * When padding is in use and applies to the transfer, insert * it and change the message type. */ - if (req->rl_rtype == rpcrdma_noch) { + if (rtype == rpcrdma_noch) { padlen = rpcrdma_inline_pullup(rqst, RPCRDMA_INLINE_PAD_VALUE(rqst)); @@ -494,7 +483,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ - if (req->rl_wtype != rpcrdma_noch) { + if (wtype != rpcrdma_noch) { dprintk("RPC: %s: invalid chunk list\n", __func__); return -EIO; @@ -515,18 +504,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) * on receive. Therefore, we request a reply chunk * for non-writes wherever feasible and efficient. */ - if (req->rl_wtype == rpcrdma_noch) - req->rl_wtype = rpcrdma_replych; + if (wtype == rpcrdma_noch) + wtype = rpcrdma_replych; } } - hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen); + if (rtype != rpcrdma_noch) { + hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, + headerp, rtype); + wtype = rtype; /* simplify dprintk */ + + } else if (wtype != rpcrdma_noch) { + hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, + headerp, wtype); + } if (hdrlen < 0) return hdrlen; dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" " headerp 0x%p base 0x%p lkey 0x%x\n", - __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, + __func__, transfertypes[wtype], hdrlen, rpclen, padlen, headerp, base, rdmab_lkey(req->rl_rdmabuf)); /* diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 2e192baa59f3..54f23b1be986 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -157,12 +157,47 @@ static struct ctl_table sunrpc_table[] = { static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ static void +xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) +{ + struct sockaddr_in *sin = (struct sockaddr_in *)sap; + char buf[20]; + + snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); + xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); + + xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; +} + +static void +xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) +{ + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; + char buf[40]; + + snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); + xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); + + xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; +} + +static void xprt_rdma_format_addresses(struct rpc_xprt *xprt) { struct sockaddr *sap = (struct sockaddr *) &rpcx_to_rdmad(xprt).addr; - struct sockaddr_in *sin = (struct sockaddr_in *)sap; - char buf[64]; + char buf[128]; + + switch (sap->sa_family) { + case AF_INET: + xprt_rdma_format_addresses4(xprt, sap); + break; + case AF_INET6: + xprt_rdma_format_addresses6(xprt, sap); + break; + default: + pr_err("rpcrdma: Unrecognized address family\n"); + return; + } (void)rpc_ntop(sap, buf, sizeof(buf)); xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); @@ -170,16 +205,10 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt) snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); - xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; - - snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); - xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); - snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); - /* netid */ - xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; + xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; } static void @@ -377,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args) xprt_rdma_connect_worker); xprt_rdma_format_addresses(xprt); - xprt->max_payload = rpcrdma_max_payload(new_xprt); + xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); + if (xprt->max_payload == 0) + goto out4; + xprt->max_payload <<= PAGE_SHIFT; dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", __func__, xprt->max_payload); @@ -552,8 +584,8 @@ xprt_rdma_free(void *buffer) for (i = 0; req->rl_nchunks;) { --req->rl_nchunks; - i += rpcrdma_deregister_external( - &req->rl_segments[i], r_xprt); + i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, + &req->rl_segments[i]); } rpcrdma_buffer_put(req); @@ -579,10 +611,7 @@ xprt_rdma_send_request(struct rpc_task *task) struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); int rc = 0; - if (req->rl_niovs == 0) - rc = rpcrdma_marshal_req(rqst); - else if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_ALLPHYSICAL) - rc = rpcrdma_marshal_chunks(rqst, 0); + rc = rpcrdma_marshal_req(rqst); if (rc < 0) goto failed_marshal; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 124676c13780..4870d272e006 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -50,6 +50,7 @@ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/prefetch.h> +#include <linux/sunrpc/addr.h> #include <asm/bitops.h> #include "xprt_rdma.h" @@ -62,9 +63,6 @@ # define RPCDBG_FACILITY RPCDBG_TRANS #endif -static void rpcrdma_reset_frmrs(struct rpcrdma_ia *); -static void rpcrdma_reset_fmrs(struct rpcrdma_ia *); - /* * internal functions */ @@ -188,7 +186,7 @@ static const char * const wc_status[] = { "remote access error", "remote operation error", "transport retry counter exceeded", - "RNR retrycounter exceeded", + "RNR retry counter exceeded", "local RDD violation error", "remove invalid RD request", "operation aborted", @@ -206,21 +204,17 @@ static const char * const wc_status[] = { static void rpcrdma_sendcq_process_wc(struct ib_wc *wc) { - if (likely(wc->status == IB_WC_SUCCESS)) - return; - /* WARNING: Only wr_id and status are reliable at this point */ - if (wc->wr_id == 0ULL) { - if (wc->status != IB_WC_WR_FLUSH_ERR) + if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) { + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) pr_err("RPC: %s: SEND: %s\n", __func__, COMPLETION_MSG(wc->status)); } else { struct rpcrdma_mw *r; r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; - r->r.frmr.fr_state = FRMR_IS_STALE; - pr_err("RPC: %s: frmr %p (stale): %s\n", - __func__, r, COMPLETION_MSG(wc->status)); + r->mw_sendcompletion(wc); } } @@ -424,7 +418,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) struct rpcrdma_ia *ia = &xprt->rx_ia; struct rpcrdma_ep *ep = &xprt->rx_ep; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; + struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr; #endif struct ib_qp_attr *attr = &ia->ri_qp_attr; struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; @@ -480,9 +474,8 @@ connected: wake_up_all(&ep->rep_connect_wait); /*FALLTHROUGH*/ default: - dprintk("RPC: %s: %pI4:%u (ep 0x%p): %s\n", - __func__, &addr->sin_addr.s_addr, - ntohs(addr->sin_port), ep, + dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", + __func__, sap, rpc_get_port(sap), ep, CONNECTION_MSG(event->event)); break; } @@ -491,19 +484,16 @@ connected: if (connstate == 1) { int ird = attr->max_dest_rd_atomic; int tird = ep->rep_remote_cma.responder_resources; - printk(KERN_INFO "rpcrdma: connection to %pI4:%u " - "on %s, memreg %d slots %d ird %d%s\n", - &addr->sin_addr.s_addr, - ntohs(addr->sin_port), + + pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n", + sap, rpc_get_port(sap), ia->ri_id->device->name, - ia->ri_memreg_strategy, + ia->ri_ops->ro_displayname, xprt->rx_buf.rb_max_requests, ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); } else if (connstate < 0) { - printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n", - &addr->sin_addr.s_addr, - ntohs(addr->sin_port), - connstate); + pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n", + sap, rpc_get_port(sap), connstate); } #endif @@ -621,17 +611,13 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) if (memreg == RPCRDMA_FRMR) { /* Requires both frmr reg and local dma lkey */ - if ((devattr->device_cap_flags & + if (((devattr->device_cap_flags & (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != - (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { + (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || + (devattr->max_fast_reg_page_list_len == 0)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; - } else { - /* Mind the ia limit on FRMR page list depth */ - ia->ri_max_frmr_depth = min_t(unsigned int, - RPCRDMA_MAX_DATA_SEGS, - devattr->max_fast_reg_page_list_len); } } if (memreg == RPCRDMA_MTHCAFMR) { @@ -652,13 +638,16 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) */ switch (memreg) { case RPCRDMA_FRMR: + ia->ri_ops = &rpcrdma_frwr_memreg_ops; break; case RPCRDMA_ALLPHYSICAL: + ia->ri_ops = &rpcrdma_physical_memreg_ops; mem_priv = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; goto register_setup; case RPCRDMA_MTHCAFMR: + ia->ri_ops = &rpcrdma_fmr_memreg_ops; if (ia->ri_have_dma_lkey) break; mem_priv = IB_ACCESS_LOCAL_WRITE; @@ -678,8 +667,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) rc = -ENOMEM; goto out3; } - dprintk("RPC: %s: memory registration strategy is %d\n", - __func__, memreg); + dprintk("RPC: %s: memory registration strategy is '%s'\n", + __func__, ia->ri_ops->ro_displayname); /* Else will do memory reg/dereg for each chunk */ ia->ri_memreg_strategy = memreg; @@ -743,49 +732,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; ep->rep_attr.qp_context = ep; - /* send_cq and recv_cq initialized below */ ep->rep_attr.srq = NULL; ep->rep_attr.cap.max_send_wr = cdata->max_requests; - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: { - int depth = 7; - - /* Add room for frmr register and invalidate WRs. - * 1. FRMR reg WR for head - * 2. FRMR invalidate WR for head - * 3. N FRMR reg WRs for pagelist - * 4. N FRMR invalidate WRs for pagelist - * 5. FRMR reg WR for tail - * 6. FRMR invalidate WR for tail - * 7. The RDMA_SEND WR - */ - - /* Calculate N if the device max FRMR depth is smaller than - * RPCRDMA_MAX_DATA_SEGS. - */ - if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { - int delta = RPCRDMA_MAX_DATA_SEGS - - ia->ri_max_frmr_depth; - - do { - depth += 2; /* FRMR reg + invalidate */ - delta -= ia->ri_max_frmr_depth; - } while (delta > 0); - - } - ep->rep_attr.cap.max_send_wr *= depth; - if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { - cdata->max_requests = devattr->max_qp_wr / depth; - if (!cdata->max_requests) - return -EINVAL; - ep->rep_attr.cap.max_send_wr = cdata->max_requests * - depth; - } - break; - } - default: - break; - } + rc = ia->ri_ops->ro_open(ia, ep, cdata); + if (rc) + return rc; ep->rep_attr.cap.max_recv_wr = cdata->max_requests; ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); ep->rep_attr.cap.max_recv_sge = 1; @@ -944,21 +895,9 @@ retry: rpcrdma_ep_disconnect(ep, ia); rpcrdma_flush_cqs(ep); - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: - rpcrdma_reset_frmrs(ia); - break; - case RPCRDMA_MTHCAFMR: - rpcrdma_reset_fmrs(ia); - break; - case RPCRDMA_ALLPHYSICAL: - break; - default: - rc = -EIO; - goto out; - } - xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); + ia->ri_ops->ro_reset(xprt); + id = rpcrdma_create_id(xprt, ia, (struct sockaddr *)&xprt->rx_data.addr); if (IS_ERR(id)) { @@ -1123,91 +1062,6 @@ out: return ERR_PTR(rc); } -static int -rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) -{ - int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; - struct ib_fmr_attr fmr_attr = { - .max_pages = RPCRDMA_MAX_DATA_SEGS, - .max_maps = 1, - .page_shift = PAGE_SHIFT - }; - struct rpcrdma_mw *r; - int i, rc; - - i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); - - while (i--) { - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r == NULL) - return -ENOMEM; - - r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr); - if (IS_ERR(r->r.fmr)) { - rc = PTR_ERR(r->r.fmr); - dprintk("RPC: %s: ib_alloc_fmr failed %i\n", - __func__, rc); - goto out_free; - } - - list_add(&r->mw_list, &buf->rb_mws); - list_add(&r->mw_all, &buf->rb_all); - } - return 0; - -out_free: - kfree(r); - return rc; -} - -static int -rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) -{ - struct rpcrdma_frmr *f; - struct rpcrdma_mw *r; - int i, rc; - - i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); - - while (i--) { - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r == NULL) - return -ENOMEM; - f = &r->r.frmr; - - f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, - ia->ri_max_frmr_depth); - if (IS_ERR(f->fr_mr)) { - rc = PTR_ERR(f->fr_mr); - dprintk("RPC: %s: ib_alloc_fast_reg_mr " - "failed %i\n", __func__, rc); - goto out_free; - } - - f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device, - ia->ri_max_frmr_depth); - if (IS_ERR(f->fr_pgl)) { - rc = PTR_ERR(f->fr_pgl); - dprintk("RPC: %s: ib_alloc_fast_reg_page_list " - "failed %i\n", __func__, rc); - - ib_dereg_mr(f->fr_mr); - goto out_free; - } - - list_add(&r->mw_list, &buf->rb_mws); - list_add(&r->mw_all, &buf->rb_all); - } - - return 0; - -out_free: - kfree(r); - return rc; -} - int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) { @@ -1244,22 +1098,9 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) buf->rb_recv_bufs = (struct rpcrdma_rep **) p; p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; - INIT_LIST_HEAD(&buf->rb_mws); - INIT_LIST_HEAD(&buf->rb_all); - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: - rc = rpcrdma_init_frmrs(ia, buf); - if (rc) - goto out; - break; - case RPCRDMA_MTHCAFMR: - rc = rpcrdma_init_fmrs(ia, buf); - if (rc) - goto out; - break; - default: - break; - } + rc = ia->ri_ops->ro_init(r_xprt); + if (rc) + goto out; for (i = 0; i < buf->rb_max_requests; i++) { struct rpcrdma_req *req; @@ -1311,47 +1152,6 @@ rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) kfree(req); } -static void -rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf) -{ - struct rpcrdma_mw *r; - int rc; - - while (!list_empty(&buf->rb_all)) { - r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); - list_del(&r->mw_all); - list_del(&r->mw_list); - - rc = ib_dealloc_fmr(r->r.fmr); - if (rc) - dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", - __func__, rc); - - kfree(r); - } -} - -static void -rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf) -{ - struct rpcrdma_mw *r; - int rc; - - while (!list_empty(&buf->rb_all)) { - r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); - list_del(&r->mw_all); - list_del(&r->mw_list); - - rc = ib_dereg_mr(r->r.frmr.fr_mr); - if (rc) - dprintk("RPC: %s: ib_dereg_mr failed %i\n", - __func__, rc); - ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); - - kfree(r); - } -} - void rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) { @@ -1372,104 +1172,11 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]); } - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: - rpcrdma_destroy_frmrs(buf); - break; - case RPCRDMA_MTHCAFMR: - rpcrdma_destroy_fmrs(buf); - break; - default: - break; - } + ia->ri_ops->ro_destroy(buf); kfree(buf->rb_pool); } -/* After a disconnect, unmap all FMRs. - * - * This is invoked only in the transport connect worker in order - * to serialize with rpcrdma_register_fmr_external(). - */ -static void -rpcrdma_reset_fmrs(struct rpcrdma_ia *ia) -{ - struct rpcrdma_xprt *r_xprt = - container_of(ia, struct rpcrdma_xprt, rx_ia); - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - struct list_head *pos; - struct rpcrdma_mw *r; - LIST_HEAD(l); - int rc; - - list_for_each(pos, &buf->rb_all) { - r = list_entry(pos, struct rpcrdma_mw, mw_all); - - INIT_LIST_HEAD(&l); - list_add(&r->r.fmr->list, &l); - rc = ib_unmap_fmr(&l); - if (rc) - dprintk("RPC: %s: ib_unmap_fmr failed %i\n", - __func__, rc); - } -} - -/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in - * an unusable state. Find FRMRs in this state and dereg / reg - * each. FRMRs that are VALID and attached to an rpcrdma_req are - * also torn down. - * - * This gives all in-use FRMRs a fresh rkey and leaves them INVALID. - * - * This is invoked only in the transport connect worker in order - * to serialize with rpcrdma_register_frmr_external(). - */ -static void -rpcrdma_reset_frmrs(struct rpcrdma_ia *ia) -{ - struct rpcrdma_xprt *r_xprt = - container_of(ia, struct rpcrdma_xprt, rx_ia); - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - struct list_head *pos; - struct rpcrdma_mw *r; - int rc; - - list_for_each(pos, &buf->rb_all) { - r = list_entry(pos, struct rpcrdma_mw, mw_all); - - if (r->r.frmr.fr_state == FRMR_IS_INVALID) - continue; - - rc = ib_dereg_mr(r->r.frmr.fr_mr); - if (rc) - dprintk("RPC: %s: ib_dereg_mr failed %i\n", - __func__, rc); - ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); - - r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, - ia->ri_max_frmr_depth); - if (IS_ERR(r->r.frmr.fr_mr)) { - rc = PTR_ERR(r->r.frmr.fr_mr); - dprintk("RPC: %s: ib_alloc_fast_reg_mr" - " failed %i\n", __func__, rc); - continue; - } - r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list( - ia->ri_id->device, - ia->ri_max_frmr_depth); - if (IS_ERR(r->r.frmr.fr_pgl)) { - rc = PTR_ERR(r->r.frmr.fr_pgl); - dprintk("RPC: %s: " - "ib_alloc_fast_reg_page_list " - "failed %i\n", __func__, rc); - - ib_dereg_mr(r->r.frmr.fr_mr); - continue; - } - r->r.frmr.fr_state = FRMR_IS_INVALID; - } -} - /* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving * some req segments uninitialized. */ @@ -1509,7 +1216,7 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) } } -/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external(). +/* rpcrdma_unmap_one() was already done during deregistration. * Redo only the ib_post_send(). */ static void @@ -1729,6 +1436,14 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) * Wrappers for internal-use kmalloc memory registration, used by buffer code. */ +void +rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg) +{ + dprintk("RPC: map_one: offset %p iova %llx len %zu\n", + seg->mr_offset, + (unsigned long long)seg->mr_dma, seg->mr_dmalen); +} + static int rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, struct ib_mr **mrp, struct ib_sge *iov) @@ -1854,287 +1569,6 @@ rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) } /* - * Wrappers for chunk registration, shared by read/write chunk code. - */ - -static void -rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing) -{ - seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - seg->mr_dmalen = seg->mr_len; - if (seg->mr_page) - seg->mr_dma = ib_dma_map_page(ia->ri_id->device, - seg->mr_page, offset_in_page(seg->mr_offset), - seg->mr_dmalen, seg->mr_dir); - else - seg->mr_dma = ib_dma_map_single(ia->ri_id->device, - seg->mr_offset, - seg->mr_dmalen, seg->mr_dir); - if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) { - dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n", - __func__, - (unsigned long long)seg->mr_dma, - seg->mr_offset, seg->mr_dmalen); - } -} - -static void -rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg) -{ - if (seg->mr_page) - ib_dma_unmap_page(ia->ri_id->device, - seg->mr_dma, seg->mr_dmalen, seg->mr_dir); - else - ib_dma_unmap_single(ia->ri_id->device, - seg->mr_dma, seg->mr_dmalen, seg->mr_dir); -} - -static int -rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, - int *nsegs, int writing, struct rpcrdma_ia *ia, - struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_mr_seg *seg1 = seg; - struct rpcrdma_mw *mw = seg1->rl_mw; - struct rpcrdma_frmr *frmr = &mw->r.frmr; - struct ib_mr *mr = frmr->fr_mr; - struct ib_send_wr fastreg_wr, *bad_wr; - u8 key; - int len, pageoff; - int i, rc; - int seg_len; - u64 pa; - int page_no; - - pageoff = offset_in_page(seg1->mr_offset); - seg1->mr_offset -= pageoff; /* start of page */ - seg1->mr_len += pageoff; - len = -pageoff; - if (*nsegs > ia->ri_max_frmr_depth) - *nsegs = ia->ri_max_frmr_depth; - for (page_no = i = 0; i < *nsegs;) { - rpcrdma_map_one(ia, seg, writing); - pa = seg->mr_dma; - for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { - frmr->fr_pgl->page_list[page_no++] = pa; - pa += PAGE_SIZE; - } - len += seg->mr_len; - ++seg; - ++i; - /* Check for holes */ - if ((i < *nsegs && offset_in_page(seg->mr_offset)) || - offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) - break; - } - dprintk("RPC: %s: Using frmr %p to map %d segments\n", - __func__, mw, i); - - frmr->fr_state = FRMR_IS_VALID; - - memset(&fastreg_wr, 0, sizeof(fastreg_wr)); - fastreg_wr.wr_id = (unsigned long)(void *)mw; - fastreg_wr.opcode = IB_WR_FAST_REG_MR; - fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma; - fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; - fastreg_wr.wr.fast_reg.page_list_len = page_no; - fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; - fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT; - if (fastreg_wr.wr.fast_reg.length < len) { - rc = -EIO; - goto out_err; - } - - /* Bump the key */ - key = (u8)(mr->rkey & 0x000000FF); - ib_update_fast_reg_key(mr, ++key); - - fastreg_wr.wr.fast_reg.access_flags = (writing ? - IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : - IB_ACCESS_REMOTE_READ); - fastreg_wr.wr.fast_reg.rkey = mr->rkey; - DECR_CQCOUNT(&r_xprt->rx_ep); - - rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); - if (rc) { - dprintk("RPC: %s: failed ib_post_send for register," - " status %i\n", __func__, rc); - ib_update_fast_reg_key(mr, --key); - goto out_err; - } else { - seg1->mr_rkey = mr->rkey; - seg1->mr_base = seg1->mr_dma + pageoff; - seg1->mr_nsegs = i; - seg1->mr_len = len; - } - *nsegs = i; - return 0; -out_err: - frmr->fr_state = FRMR_IS_INVALID; - while (i--) - rpcrdma_unmap_one(ia, --seg); - return rc; -} - -static int -rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg, - struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_mr_seg *seg1 = seg; - struct ib_send_wr invalidate_wr, *bad_wr; - int rc; - - seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; - - memset(&invalidate_wr, 0, sizeof invalidate_wr); - invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; - invalidate_wr.opcode = IB_WR_LOCAL_INV; - invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; - DECR_CQCOUNT(&r_xprt->rx_ep); - - read_lock(&ia->ri_qplock); - while (seg1->mr_nsegs--) - rpcrdma_unmap_one(ia, seg++); - rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); - read_unlock(&ia->ri_qplock); - if (rc) { - /* Force rpcrdma_buffer_get() to retry */ - seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; - dprintk("RPC: %s: failed ib_post_send for invalidate," - " status %i\n", __func__, rc); - } - return rc; -} - -static int -rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg, - int *nsegs, int writing, struct rpcrdma_ia *ia) -{ - struct rpcrdma_mr_seg *seg1 = seg; - u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; - int len, pageoff, i, rc; - - pageoff = offset_in_page(seg1->mr_offset); - seg1->mr_offset -= pageoff; /* start of page */ - seg1->mr_len += pageoff; - len = -pageoff; - if (*nsegs > RPCRDMA_MAX_DATA_SEGS) - *nsegs = RPCRDMA_MAX_DATA_SEGS; - for (i = 0; i < *nsegs;) { - rpcrdma_map_one(ia, seg, writing); - physaddrs[i] = seg->mr_dma; - len += seg->mr_len; - ++seg; - ++i; - /* Check for holes */ - if ((i < *nsegs && offset_in_page(seg->mr_offset)) || - offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) - break; - } - rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma); - if (rc) { - dprintk("RPC: %s: failed ib_map_phys_fmr " - "%u@0x%llx+%i (%d)... status %i\n", __func__, - len, (unsigned long long)seg1->mr_dma, - pageoff, i, rc); - while (i--) - rpcrdma_unmap_one(ia, --seg); - } else { - seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey; - seg1->mr_base = seg1->mr_dma + pageoff; - seg1->mr_nsegs = i; - seg1->mr_len = len; - } - *nsegs = i; - return rc; -} - -static int -rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg, - struct rpcrdma_ia *ia) -{ - struct rpcrdma_mr_seg *seg1 = seg; - LIST_HEAD(l); - int rc; - - list_add(&seg1->rl_mw->r.fmr->list, &l); - rc = ib_unmap_fmr(&l); - read_lock(&ia->ri_qplock); - while (seg1->mr_nsegs--) - rpcrdma_unmap_one(ia, seg++); - read_unlock(&ia->ri_qplock); - if (rc) - dprintk("RPC: %s: failed ib_unmap_fmr," - " status %i\n", __func__, rc); - return rc; -} - -int -rpcrdma_register_external(struct rpcrdma_mr_seg *seg, - int nsegs, int writing, struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_ia *ia = &r_xprt->rx_ia; - int rc = 0; - - switch (ia->ri_memreg_strategy) { - - case RPCRDMA_ALLPHYSICAL: - rpcrdma_map_one(ia, seg, writing); - seg->mr_rkey = ia->ri_bind_mem->rkey; - seg->mr_base = seg->mr_dma; - seg->mr_nsegs = 1; - nsegs = 1; - break; - - /* Registration using frmr registration */ - case RPCRDMA_FRMR: - rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt); - break; - - /* Registration using fmr memory registration */ - case RPCRDMA_MTHCAFMR: - rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia); - break; - - default: - return -EIO; - } - if (rc) - return rc; - - return nsegs; -} - -int -rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg, - struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_ia *ia = &r_xprt->rx_ia; - int nsegs = seg->mr_nsegs, rc; - - switch (ia->ri_memreg_strategy) { - - case RPCRDMA_ALLPHYSICAL: - read_lock(&ia->ri_qplock); - rpcrdma_unmap_one(ia, seg); - read_unlock(&ia->ri_qplock); - break; - - case RPCRDMA_FRMR: - rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt); - break; - - case RPCRDMA_MTHCAFMR: - rc = rpcrdma_deregister_fmr_external(seg, ia); - break; - - default: - break; - } - return nsegs; -} - -/* * Prepost any receive buffer, then post send. * * Receive buffer is donated to hardware, reclaimed upon recv completion. @@ -2156,7 +1590,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, } send_wr.next = NULL; - send_wr.wr_id = 0ULL; /* no send cookie */ + send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION; send_wr.sg_list = req->rl_send_iov; send_wr.num_sge = req->rl_niovs; send_wr.opcode = IB_WR_SEND; @@ -2215,43 +1649,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, return rc; } -/* Physical mapping means one Read/Write list entry per-page. - * All list entries must fit within an inline buffer - * - * NB: The server must return a Write list for NFS READ, - * which has the same constraint. Factor in the inline - * rsize as well. +/* How many chunk list items fit within our inline buffers? */ -static size_t -rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt) +unsigned int +rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt) { struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; - unsigned int inline_size, pages; + int bytes, segments; - inline_size = min_t(unsigned int, - cdata->inline_wsize, cdata->inline_rsize); - inline_size -= RPCRDMA_HDRLEN_MIN; - pages = inline_size / sizeof(struct rpcrdma_segment); - return pages << PAGE_SHIFT; -} - -static size_t -rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt) -{ - return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; -} - -size_t -rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt) -{ - size_t result; - - switch (r_xprt->rx_ia.ri_memreg_strategy) { - case RPCRDMA_ALLPHYSICAL: - result = rpcrdma_physical_max_payload(r_xprt); - break; - default: - result = rpcrdma_mr_max_payload(r_xprt); + bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize); + bytes -= RPCRDMA_HDRLEN_MIN; + if (bytes < sizeof(struct rpcrdma_segment) * 2) { + pr_warn("RPC: %s: inline threshold too small\n", + __func__); + return 0; } - return result; + + segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1); + dprintk("RPC: %s: max chunk list size = %d segments\n", + __func__, segments); + return segments; } diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 0a16fb6f0885..78e0b8beaa36 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -60,6 +60,7 @@ * Interface Adapter -- one per transport instance */ struct rpcrdma_ia { + const struct rpcrdma_memreg_ops *ri_ops; rwlock_t ri_qplock; struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; @@ -105,6 +106,10 @@ struct rpcrdma_ep { #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) +/* Force completion handler to ignore the signal + */ +#define RPCRDMA_IGNORE_COMPLETION (0ULL) + /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV * * The below structure appears at the front of a large region of kmalloc'd @@ -143,14 +148,6 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb) return (struct rpcrdma_msg *)rb->rg_base; } -enum rpcrdma_chunktype { - rpcrdma_noch = 0, - rpcrdma_readch, - rpcrdma_areadch, - rpcrdma_writech, - rpcrdma_replych -}; - /* * struct rpcrdma_rep -- this structure encapsulates state required to recv * and complete a reply, asychronously. It needs several pieces of @@ -213,6 +210,7 @@ struct rpcrdma_mw { struct ib_fmr *fmr; struct rpcrdma_frmr frmr; } r; + void (*mw_sendcompletion)(struct ib_wc *); struct list_head mw_list; struct list_head mw_all; }; @@ -258,7 +256,6 @@ struct rpcrdma_req { unsigned int rl_niovs; /* 0, 2 or 4 */ unsigned int rl_nchunks; /* non-zero if chunks */ unsigned int rl_connect_cookie; /* retry detection */ - enum rpcrdma_chunktype rl_rtype, rl_wtype; struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ struct ib_sge rl_send_iov[4]; /* for active requests */ @@ -340,6 +337,29 @@ struct rpcrdma_stats { }; /* + * Per-registration mode operations + */ +struct rpcrdma_xprt; +struct rpcrdma_memreg_ops { + int (*ro_map)(struct rpcrdma_xprt *, + struct rpcrdma_mr_seg *, int, bool); + int (*ro_unmap)(struct rpcrdma_xprt *, + struct rpcrdma_mr_seg *); + int (*ro_open)(struct rpcrdma_ia *, + struct rpcrdma_ep *, + struct rpcrdma_create_data_internal *); + size_t (*ro_maxpages)(struct rpcrdma_xprt *); + int (*ro_init)(struct rpcrdma_xprt *); + void (*ro_reset)(struct rpcrdma_xprt *); + void (*ro_destroy)(struct rpcrdma_buffer *); + const char *ro_displayname; +}; + +extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops; +extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops; +extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops; + +/* * RPCRDMA transport -- encapsulates the structures above for * integration with RPC. * @@ -398,16 +418,56 @@ void rpcrdma_buffer_put(struct rpcrdma_req *); void rpcrdma_recv_buffer_get(struct rpcrdma_req *); void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); -int rpcrdma_register_external(struct rpcrdma_mr_seg *, - int, int, struct rpcrdma_xprt *); -int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, - struct rpcrdma_xprt *); - struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, size_t, gfp_t); void rpcrdma_free_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *); +unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *); + +/* + * Wrappers for chunk registration, shared by read/write chunk code. + */ + +void rpcrdma_mapping_error(struct rpcrdma_mr_seg *); + +static inline enum dma_data_direction +rpcrdma_data_dir(bool writing) +{ + return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; +} + +static inline void +rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg, + enum dma_data_direction direction) +{ + seg->mr_dir = direction; + seg->mr_dmalen = seg->mr_len; + + if (seg->mr_page) + seg->mr_dma = ib_dma_map_page(device, + seg->mr_page, offset_in_page(seg->mr_offset), + seg->mr_dmalen, seg->mr_dir); + else + seg->mr_dma = ib_dma_map_single(device, + seg->mr_offset, + seg->mr_dmalen, seg->mr_dir); + + if (ib_dma_mapping_error(device, seg->mr_dma)) + rpcrdma_mapping_error(seg); +} + +static inline void +rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg) +{ + if (seg->mr_page) + ib_dma_unmap_page(device, + seg->mr_dma, seg->mr_dmalen, seg->mr_dir); + else + ib_dma_unmap_single(device, + seg->mr_dma, seg->mr_dmalen, seg->mr_dir); +} + /* * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c */ @@ -418,9 +478,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *); /* * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c */ -ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t); int rpcrdma_marshal_req(struct rpc_rqst *); -size_t rpcrdma_max_payload(struct rpcrdma_xprt *); /* Temporary NFS request map cache. Created in svc_rdma.c */ extern struct kmem_cache *svc_rdma_map_cachep; diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..14f09b3cb87c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr) /* Clean up all queues, except inputq: */ __skb_queue_purge(&l_ptr->outqueue); __skb_queue_purge(&l_ptr->deferred_queue); - skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); - if (!skb_queue_empty(&l_ptr->inputq)) + if (!owner->inputq) + owner->inputq = &l_ptr->inputq; + skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); + if (!skb_queue_empty(owner->inputq)) owner->action_flags |= TIPC_MSG_EVT; - owner->inputq = &l_ptr->inputq; l_ptr->next_out = NULL; l_ptr->unacked_window = 0; l_ptr->checkpoint = 1; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index be2501538011..b6f84f6a2a09 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) return -EINVAL; + /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT + * as userspace might just pass through the capabilities from the IEs + * directly, rather than enforcing this restriction and returning an + * error in this case. + */ + if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { + params.ht_capa = NULL; + params.vht_capa = NULL; + } + /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cee479bc655c..638af0655aaf 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, * have the xfrm_state's. We need to wait for KM to * negotiate new SA's or bail out with error.*/ if (net->xfrm.sysctl_larval_drop) { - dst_release(dst); - xfrm_pols_put(pols, drop_pols); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); - - return ERR_PTR(-EREMOTE); + err = -EREMOTE; + goto error; } err = -EAGAIN; @@ -2324,7 +2322,8 @@ nopol: error: dst_release(dst); dropdst: - dst_release(dst_orig); + if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) + dst_release(dst_orig); xfrm_pols_put(pols, drop_pols); return ERR_PTR(err); } @@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, struct sock *sk, int flags) { struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, - flags | XFRM_LOOKUP_QUEUE); + flags | XFRM_LOOKUP_QUEUE | + XFRM_LOOKUP_KEEP_DST_REF); if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1684bcc78b34..5fde34326dcf 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, goto out; /* No partial writes. */ - length = EINVAL; + length = -EINVAL; if (*ppos != 0) goto out; diff --git a/sound/core/control.c b/sound/core/control.c index 35324a8e83c8..eeb691d1911f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, if (info->count < 1) return -EINVAL; + if (!*info->id.name) + return -EINVAL; + if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) + return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h index de7602bd69b5..27b044f84c81 100644 --- a/sound/firewire/dice/dice-interface.h +++ b/sound/firewire/dice/dice-interface.h @@ -299,23 +299,23 @@ #define RX_ISOCHRONOUS 0x008 /* + * Index of first quadlet to be interpreted; read/write. If > 0, that many + * quadlets at the beginning of each data block will be ignored, and all the + * audio and MIDI quadlets will follow. + */ +#define RX_SEQ_START 0x00c + +/* * The number of audio channels; read-only. There will be one quadlet per * channel. */ -#define RX_NUMBER_AUDIO 0x00c +#define RX_NUMBER_AUDIO 0x010 /* * The number of MIDI ports, 0-8; read-only. If > 0, there will be one * additional quadlet in each data block, following the audio quadlets. */ -#define RX_NUMBER_MIDI 0x010 - -/* - * Index of first quadlet to be interpreted; read/write. If > 0, that many - * quadlets at the beginning of each data block will be ignored, and all the - * audio and MIDI quadlets will follow. - */ -#define RX_SEQ_START 0x014 +#define RX_NUMBER_MIDI 0x014 /* * Names of all audio channels; read-only. Quadlets are byte-swapped. Names diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c index ecfe20fd4de5..f5c1d1bced59 100644 --- a/sound/firewire/dice/dice-proc.c +++ b/sound/firewire/dice/dice-proc.c @@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry, } tx; struct { u32 iso; + u32 seq_start; u32 number_audio; u32 number_midi; - u32 seq_start; char names[RX_NAMES_SIZE]; u32 ac3_caps; u32 ac3_enable; @@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry, break; snd_iprintf(buffer, "rx %u:\n", stream); snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); + snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); snd_iprintf(buffer, " audio channels: %u\n", buf.rx.number_audio); snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); - snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); if (quadlets >= 68) { dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); snd_iprintf(buffer, " names: %s\n", buf.rx.names); diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index 5f17b77ee152..f0e4d502d604 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c @@ -26,7 +26,7 @@ int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) { r->channels_mask = ~0uLL; - r->unit = fw_unit_get(unit); + r->unit = unit; mutex_init(&r->mutex); r->allocated = false; @@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r) { WARN_ON(r->allocated); mutex_destroy(&r->mutex); - fw_unit_put(r->unit); } EXPORT_SYMBOL(fw_iso_resources_destroy); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index a2ce773bdc62..17c2637d842c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } - if (!bus->no_response_fallback) + if (bus->no_response_fallback) return -1; if (!chip->polling_mode && chip->poll_count < 2) { diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b680b4ec6331..8ec5289f8e05 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, return val; } +/* is this a stereo widget or a stereo-to-mono mix? */ +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir) +{ + unsigned int wcaps = get_wcaps(codec, nid); + hda_nid_t conn; + + if (wcaps & AC_WCAP_STEREO) + return true; + if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX) + return false; + if (snd_hda_get_num_conns(codec, nid) != 1) + return false; + if (snd_hda_get_connections(codec, nid, &conn, 1) < 0) + return false; + return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO); +} + /* initialize the amp value (only at the first time) */ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { unsigned int caps = query_amp_caps(codec, nid, dir); int val = get_amp_val_to_activate(codec, nid, dir, caps, false); - snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); + + if (is_stereo_amps(codec, nid, dir)) + snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); + else + snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); +} + +/* update the amp, doing in stereo or mono depending on NID */ +static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, + unsigned int mask, unsigned int val) +{ + if (is_stereo_amps(codec, nid, dir)) + return snd_hda_codec_amp_stereo(codec, nid, dir, idx, + mask, val); + else + return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, + mask, val); } /* calculate amp value mask we can modify; @@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, return; val &= mask; - snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); + update_amp(codec, nid, dir, idx, mask, val); } static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, @@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) has_amp = nid_has_mute(codec, mix, HDA_INPUT); for (i = 0; i < nums; i++) { if (has_amp) - snd_hda_codec_amp_stereo(codec, mix, - HDA_INPUT, i, - 0xff, HDA_AMP_MUTE); + update_amp(codec, mix, HDA_INPUT, i, + 0xff, HDA_AMP_MUTE); else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) - snd_hda_codec_amp_stereo(codec, conn[i], - HDA_OUTPUT, 0, - 0xff, HDA_AMP_MUTE); + update_amp(codec, conn[i], HDA_OUTPUT, 0, + 0xff, HDA_AMP_MUTE); } } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4ca3d5d02436..a8a1e14272a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Sunrise Point */ { PCI_DEVICE(0x8086, 0xa170), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, /* Sunrise Point-LP */ { PCI_DEVICE(0x8086, 0x9d70), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index ce5a6da83419..05e19f78b4cb 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer, (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); } +/* is this a stereo widget or a stereo-to-mono mix? */ +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, + int dir, unsigned int wcaps, int indices) +{ + hda_nid_t conn; + + if (wcaps & AC_WCAP_STEREO) + return true; + /* check for a stereo-to-mono mix; it must be: + * only a single connection, only for input, and only a mixer widget + */ + if (indices != 1 || dir != HDA_INPUT || + get_wcaps_type(wcaps) != AC_WID_AUD_MIX) + return false; + + if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0) + return false; + /* the connection source is a stereo? */ + wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP); + return !!(wcaps & AC_WCAP_STEREO); +} + static void print_amp_vals(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid, - int dir, int stereo, int indices) + int dir, unsigned int wcaps, int indices) { unsigned int val; + bool stereo; int i; + stereo = is_stereo_amps(codec, nid, dir, wcaps, indices); + dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; for (i = 0; i < indices; i++) { snd_iprintf(buffer, " ["); @@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry, (codec->single_adc_amp && wid_type == AC_WID_AUD_IN)) print_amp_vals(buffer, codec, nid, HDA_INPUT, - wid_caps & AC_WCAP_STEREO, - 1); + wid_caps, 1); else print_amp_vals(buffer, codec, nid, HDA_INPUT, - wid_caps & AC_WCAP_STEREO, - conn_len); + wid_caps, conn_len); } if (wid_caps & AC_WCAP_OUT_AMP) { snd_iprintf(buffer, " Amp-Out caps: "); @@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry, if (wid_type == AC_WID_PIN && codec->pin_amp_workaround) print_amp_vals(buffer, codec, nid, HDA_OUTPUT, - wid_caps & AC_WCAP_STEREO, - conn_len); + wid_caps, conn_len); else print_amp_vals(buffer, codec, nid, HDA_OUTPUT, - wid_caps & AC_WCAP_STEREO, 1); + wid_caps, 1); } switch (wid_type) { diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 1589c9bcce3e..dd2b3d92071f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), + SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), {} /* terminator */ @@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) return -ENOMEM; spec->gen.automute_hook = cs_automute; + codec->single_adc_amp = 1; snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, cs420x_fixups); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fd3ed18670e9..da67ea8645a6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -223,6 +223,7 @@ enum { CXT_PINCFG_LENOVO_TP410, CXT_PINCFG_LEMOTE_A1004, CXT_PINCFG_LEMOTE_A1205, + CXT_PINCFG_COMPAQ_CQ60, CXT_FIXUP_STEREO_DMIC, CXT_FIXUP_INC_MIC_BOOST, CXT_FIXUP_HEADPHONE_MIC_PIN, @@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_PINS, .v.pins = cxt_pincfg_lemote, }, + [CXT_PINCFG_COMPAQ_CQ60] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* 0x17 was falsely set up as a mic, it should 0x1d */ + { 0x17, 0x400001f0 }, + { 0x1d, 0x97a70120 }, + { } + } + }, [CXT_FIXUP_STEREO_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_stereo_dmic, @@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { }; static const struct snd_pci_quirk cxt5051_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 526398a4a442..74382137b9f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) { /* We currently only handle front, HP */ static hda_nid_t pins[] = { - 0x0f, 0x10, 0x14, 0x15, 0 + 0x0f, 0x10, 0x14, 0x15, 0x17, 0 }; hda_nid_t *p; for (p = pins; *p; p++) @@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c index b67480f1b1aa..4373ada95648 100644 --- a/sound/soc/codecs/adav80x.c +++ b/sound/soc/codecs/adav80x.c @@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); - unsigned int deemph = ucontrol->value.enumerated.item[0]; + unsigned int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; @@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = adav80x->deemph; + ucontrol->value.integer.value[0] = adav80x->deemph; return 0; }; diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c index 70861c7b1631..81b54a270bd8 100644 --- a/sound/soc/codecs/ak4641.c +++ b/sound/soc/codecs/ak4641.c @@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; @@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = ak4641->deemph; + ucontrol->value.integer.value[0] = ak4641->deemph; return 0; }; diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c index 632e89f793a7..2a58b1dccd2f 100644 --- a/sound/soc/codecs/ak4671.c +++ b/sound/soc/codecs/ak4671.c @@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = { }; static const struct snd_soc_dapm_route ak4671_intercon[] = { - {"DAC Left", "NULL", "PMPLL"}, - {"DAC Right", "NULL", "PMPLL"}, - {"ADC Left", "NULL", "PMPLL"}, - {"ADC Right", "NULL", "PMPLL"}, + {"DAC Left", NULL, "PMPLL"}, + {"DAC Right", NULL, "PMPLL"}, + {"ADC Left", NULL, "PMPLL"}, + {"ADC Right", NULL, "PMPLL"}, /* Outputs */ - {"LOUT1", "NULL", "LOUT1 Mixer"}, - {"ROUT1", "NULL", "ROUT1 Mixer"}, - {"LOUT2", "NULL", "LOUT2 Mix Amp"}, - {"ROUT2", "NULL", "ROUT2 Mix Amp"}, - {"LOUT3", "NULL", "LOUT3 Mixer"}, - {"ROUT3", "NULL", "ROUT3 Mixer"}, + {"LOUT1", NULL, "LOUT1 Mixer"}, + {"ROUT1", NULL, "ROUT1 Mixer"}, + {"LOUT2", NULL, "LOUT2 Mix Amp"}, + {"ROUT2", NULL, "ROUT2 Mix Amp"}, + {"LOUT3", NULL, "LOUT3 Mixer"}, + {"ROUT3", NULL, "ROUT3 Mixer"}, {"LOUT1 Mixer", "DACL", "DAC Left"}, {"ROUT1 Mixer", "DACR", "DAC Right"}, {"LOUT2 Mixer", "DACHL", "DAC Left"}, {"ROUT2 Mixer", "DACHR", "DAC Right"}, - {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, - {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, + {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"}, + {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"}, {"LOUT3 Mixer", "DACSL", "DAC Left"}, {"ROUT3 Mixer", "DACSR", "DAC Right"}, @@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = { {"LIN2", NULL, "Mic Bias"}, {"RIN2", NULL, "Mic Bias"}, - {"ADC Left", "NULL", "LIN MUX"}, - {"ADC Right", "NULL", "RIN MUX"}, + {"ADC Left", NULL, "LIN MUX"}, + {"ADC Right", NULL, "RIN MUX"}, /* Analog Loops */ - {"LIN1 Mixing Circuit", "NULL", "LIN1"}, - {"RIN1 Mixing Circuit", "NULL", "RIN1"}, - {"LIN2 Mixing Circuit", "NULL", "LIN2"}, - {"RIN2 Mixing Circuit", "NULL", "RIN2"}, - {"LIN3 Mixing Circuit", "NULL", "LIN3"}, - {"RIN3 Mixing Circuit", "NULL", "RIN3"}, - {"LIN4 Mixing Circuit", "NULL", "LIN4"}, - {"RIN4 Mixing Circuit", "NULL", "RIN4"}, + {"LIN1 Mixing Circuit", NULL, "LIN1"}, + {"RIN1 Mixing Circuit", NULL, "RIN1"}, + {"LIN2 Mixing Circuit", NULL, "LIN2"}, + {"RIN2 Mixing Circuit", NULL, "RIN2"}, + {"LIN3 Mixing Circuit", NULL, "LIN3"}, + {"RIN3 Mixing Circuit", NULL, "RIN3"}, + {"LIN4 Mixing Circuit", NULL, "LIN4"}, + {"RIN4 Mixing Circuit", NULL, "RIN4"}, {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 79a4efcb894c..7d3a6accaf9a 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c @@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = cs4271->deemph; + ucontrol->value.integer.value[0] = cs4271->deemph; return 0; } @@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - cs4271->deemph = ucontrol->value.enumerated.item[0]; + cs4271->deemph = ucontrol->value.integer.value[0]; return cs4271_set_deemph(codec); } diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c index ffe96175a8a5..911c26c705fc 100644 --- a/sound/soc/codecs/da732x.c +++ b/sound/soc/codecs/da732x.c @@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = { static const struct snd_soc_dapm_route da732x_dapm_routes[] = { /* Inputs */ - {"AUX1L PGA", "NULL", "AUX1L"}, - {"AUX1R PGA", "NULL", "AUX1R"}, + {"AUX1L PGA", NULL, "AUX1L"}, + {"AUX1R PGA", NULL, "AUX1R"}, {"MIC1 PGA", NULL, "MIC1"}, - {"MIC2 PGA", "NULL", "MIC2"}, - {"MIC3 PGA", "NULL", "MIC3"}, + {"MIC2 PGA", NULL, "MIC2"}, + {"MIC3 PGA", NULL, "MIC3"}, /* Capture Path */ {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f27325155ace..c5f35a07e8e4 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = es8328->deemph; + ucontrol->value.integer.value[0] = es8328->deemph; return 0; } @@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret; if (deemph > 1) diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c index a722a023c262..477e13d30971 100644 --- a/sound/soc/codecs/pcm1681.c +++ b/sound/soc/codecs/pcm1681.c @@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = priv->deemph; + ucontrol->value.integer.value[0] = priv->deemph; return 0; } @@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); - priv->deemph = ucontrol->value.enumerated.item[0]; + priv->deemph = ucontrol->value.integer.value[0]; return pcm1681_set_deemph(codec); } diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index f374840a5a7c..9b541e52da8c 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = { .ident = "Dell Dino", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_BOARD_NAME, "0144P8") + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") } }, { } diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e182e6569bbd..3593a1496056 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) /* Enable VDDC charge pump */ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; } else if (vddio >= 3100 && vdda >= 3100) { - /* - * if vddio and vddd > 3.1v, - * charge pump should be clean before set ana_pwr - */ - snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VDDC_CHRGPMP_POWERUP, 0); - + ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; /* VDDC use VDDIO rail */ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c index 47b257e41809..82095d6cd070 100644 --- a/sound/soc/codecs/sn95031.c +++ b/sound/soc/codecs/sn95031.c @@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = { /* speaker map */ { "IHFOUTL", NULL, "Speaker Rail"}, { "IHFOUTR", NULL, "Speaker Rail"}, - { "IHFOUTL", "NULL", "Speaker Left Playback"}, - { "IHFOUTR", "NULL", "Speaker Right Playback"}, + { "IHFOUTL", NULL, "Speaker Left Playback"}, + { "IHFOUTR", NULL, "Speaker Right Playback"}, { "Speaker Left Playback", NULL, "Speaker Left Filter"}, { "Speaker Right Playback", NULL, "Speaker Right Filter"}, { "Speaker Left Filter", NULL, "IHFDAC Left"}, diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c index 249ef5c4c762..32942bed34b1 100644 --- a/sound/soc/codecs/tas5086.c +++ b/sound/soc/codecs/tas5086.c @@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = priv->deemph; + ucontrol->value.integer.value[0] = priv->deemph; return 0; } @@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); - priv->deemph = ucontrol->value.enumerated.item[0]; + priv->deemph = ucontrol->value.integer.value[0]; return tas5086_set_deemph(codec); } diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 8d9de49a5052..21d5402e343f 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - ucontrol->value.enumerated.item[0] = wm2000->anc_active; + ucontrol->value.integer.value[0] = wm2000->anc_active; return 0; } @@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - int anc_active = ucontrol->value.enumerated.item[0]; + int anc_active = ucontrol->value.integer.value[0]; int ret; if (anc_active > 1) @@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - ucontrol->value.enumerated.item[0] = wm2000->spk_ena; + ucontrol->value.integer.value[0] = wm2000->spk_ena; return 0; } @@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - int val = ucontrol->value.enumerated.item[0]; + int val = ucontrol->value.integer.value[0]; int ret; if (val > 1) diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 098c143f44d6..c6d10533e2bd 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8731->deemph; + ucontrol->value.integer.value[0] = wm8731->deemph; return 0; } @@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret = 0; if (deemph > 1) diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index dde462c082be..04b04f8e147c 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8903->deemph; + ucontrol->value.integer.value[0] = wm8903->deemph; return 0; } @@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret = 0; if (deemph > 1) diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index d3b3f57668cc..215e93c1ddf0 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8904->deemph; + ucontrol->value.integer.value[0] = wm8904->deemph; return 0; } @@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index 1ab2d462afad..00bec915d652 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c @@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8955->deemph; + ucontrol->value.integer.value[0] = wm8955->deemph; return 0; } @@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index cf8fecf97f2c..3035d9856415 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8960->deemph; + ucontrol->value.integer.value[0] = wm8960->deemph; return 0; } @@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 9517571e820d..98c9525bd751 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); - unsigned int val = ucontrol->value.enumerated.item[0]; + unsigned int val = ucontrol->value.integer.value[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int mixer, mask, shift, old; @@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, mutex_lock(&wm9712->lock); old = wm9712->hp_mixer[mixer]; - if (ucontrol->value.enumerated.item[0]) + if (ucontrol->value.integer.value[0]) wm9712->hp_mixer[mixer] |= mask; else wm9712->hp_mixer[mixer] &= ~mask; @@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol, mixer = mc->shift >> 8; shift = mc->shift & 0xff; - ucontrol->value.enumerated.item[0] = + ucontrol->value.integer.value[0] = (wm9712->hp_mixer[mixer] >> shift) & 1; return 0; diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 68222917b396..79552953e1bd 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); - unsigned int val = ucontrol->value.enumerated.item[0]; + unsigned int val = ucontrol->value.integer.value[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int mixer, mask, shift, old; @@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, mutex_lock(&wm9713->lock); old = wm9713->hp_mixer[mixer]; - if (ucontrol->value.enumerated.item[0]) + if (ucontrol->value.integer.value[0]) wm9713->hp_mixer[mixer] |= mask; else wm9713->hp_mixer[mixer] &= ~mask; @@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol, mixer = mc->shift >> 8; shift = mc->shift & 0xff; - ucontrol->value.enumerated.item[0] = + ucontrol->value.integer.value[0] = (wm9713->hp_mixer[mixer] >> shift) & 1; return 0; diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 75870c0ea2c9..91eb3aef7f02 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, enum spdif_txrate index, bool round) { const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; - bool is_sysclk = clk == spdif_priv->sysclk; + bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); u64 rate_ideal, rate_actual, sub; u32 sysclk_dfmin, sysclk_dfmax; u32 txclk_df, sysclk_df, arate; @@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, spdif_priv->txclk_src[index], rate[index]); dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", spdif_priv->txclk_df[index], rate[index]); - if (spdif_priv->txclk[index] == spdif_priv->sysclk) + if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", spdif_priv->sysclk_df[index], rate[index]); dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index b9fabbf69db6..6b0c8f717ec2 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -603,7 +603,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, factor = (div2 + 1) * (7 * psr + 1) * 2; for (i = 0; i < 255; i++) { - tmprate = freq * factor * (i + 2); + tmprate = freq * factor * (i + 1); if (baudclk_is_used) clkrate = clk_get_rate(ssi_private->baudclk); @@ -1227,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev, ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; - ret = !of_property_read_u32_array(np, "dmas", dmas, 4); + ret = of_property_read_u32_array(np, "dmas", dmas, 4); if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { ssi_private->use_dual_fifo = true; /* When using dual fifo mode, we need to keep watermark diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c index c42ffae5fe9f..402b728c0a06 100644 --- a/sound/soc/intel/sst-haswell-dsp.c +++ b/sound/soc/intel/sst-haswell-dsp.c @@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw) module = (void *)module + sizeof(*module) + module->mod_size; } - /* allocate scratch mem regions */ - sst_block_alloc_scratch(dsp); - return 0; } diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 394af5684c05..863a9ca34b8e 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw) int sst_hsw_dsp_load(struct sst_hsw *hsw) { struct sst_dsp *dsp = hsw->dsp; + struct sst_fw *sst_fw, *t; int ret; dev_dbg(hsw->dev, "loading audio DSP...."); @@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw) return ret; } - ret = sst_fw_reload(hsw->sst_fw); - if (ret < 0) { - dev_err(hsw->dev, "error: SST FW reload failed\n"); - sst_dsp_dma_put_channel(dsp); - return -ENOMEM; + list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) { + ret = sst_fw_reload(sst_fw); + if (ret < 0) { + dev_err(hsw->dev, "error: SST FW reload failed\n"); + sst_dsp_dma_put_channel(dsp); + return -ENOMEM; + } } + ret = sst_block_alloc_scratch(hsw->dsp); + if (ret < 0) + return -EINVAL; sst_dsp_dma_put_channel(dsp); return 0; @@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw) int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) { - sst_fw_unload(hsw->sst_fw); - sst_block_free_scratch(hsw->dsp); + struct sst_fw *sst_fw, *t; + struct sst_dsp *dsp = hsw->dsp; + + list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { + sst_fw_unload(sst_fw); + } + sst_block_free_scratch(dsp); hsw->boot_complete = false; - sst_dsp_sleep(hsw->dsp); + sst_dsp_sleep(dsp); return 0; } @@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) goto fw_err; } + /* allocate scratch mem regions */ + ret = sst_block_alloc_scratch(hsw->dsp); + if (ret < 0) + goto boot_err; + /* wait for DSP boot completion */ sst_dsp_boot(hsw->dsp); ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index def7d8260c4e..d19483081f9b 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) return -EPROBE_DEFER; } else { - if (priv->extclk == priv->clk) { + if (clk_is_match(priv->extclk, priv->clk)) { devm_clk_put(&pdev->dev, priv->extclk); priv->extclk = ERR_PTR(-EINVAL); } else { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 30579ca5bacb..e5c990889dcc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(codec, &codec_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", codec->component.name); @@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, } } + mutex_unlock(&client_mutex); + if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); @@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(component, &component_list, list) { list_for_each_entry(dai, &component->dai_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", @@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(platform, &platform_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", platform->component.name); @@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file, } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component( { struct snd_soc_component *component; + lockdep_assert_held(&client_mutex); + list_for_each_entry(component, &component_list, list) { if (of_node) { if (component->dev->of_node == of_node) @@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai( struct snd_soc_component *component; struct snd_soc_dai *dai; + lockdep_assert_held(&client_mutex); + /* Find CPU DAI from registered DAIs*/ list_for_each_entry(component, &component_list, list) { if (dlc->of_node && component->dev->of_node != dlc->of_node) @@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) struct snd_soc_codec *codec; int ret, i, order; + mutex_lock(&client_mutex); mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); /* bind DAIs */ @@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) card->instantiated = 1; snd_soc_dapm_sync(&card->dapm); mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return 0; @@ -1680,6 +1698,7 @@ card_probe_error: base_error: mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return ret; } @@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component) list_del(&component->list); } -static void snd_soc_component_del(struct snd_soc_component *component) -{ - mutex_lock(&client_mutex); - snd_soc_component_del_unlocked(component); - mutex_unlock(&client_mutex); -} - int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, @@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev) { struct snd_soc_component *cmpnt; + mutex_lock(&client_mutex); list_for_each_entry(cmpnt, &component_list, list) { if (dev == cmpnt->dev && cmpnt->registered_as_component) goto found; } + mutex_unlock(&client_mutex); return; found: - snd_soc_component_del(cmpnt); + snd_soc_component_del_unlocked(cmpnt); + mutex_unlock(&client_mutex); snd_soc_component_cleanup(cmpnt); kfree(cmpnt); } @@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev) { struct snd_soc_platform *platform; + mutex_lock(&client_mutex); list_for_each_entry(platform, &platform_list, list) { - if (dev == platform->dev) + if (dev == platform->dev) { + mutex_unlock(&client_mutex); return platform; + } } + mutex_unlock(&client_mutex); return NULL; } @@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev) { struct snd_soc_codec *codec; + mutex_lock(&client_mutex); list_for_each_entry(codec, &codec_list, list) { if (dev == codec->dev) goto found; } + mutex_unlock(&client_mutex); return; found: - - mutex_lock(&client_mutex); list_del(&codec->list); snd_soc_component_del_unlocked(&codec->component); mutex_unlock(&client_mutex); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 67d476548dcf..07f984d5f516 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + USB_DEVICE(0x0582, 0x0159), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + /* .vendor_name = "Roland", */ + /* .product_name = "UA-22", */ + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = & (const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + }, + { + .ifnum = -1 + } + } + } +}, /* this catches most recent vendor-specific Roland devices */ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 61bf9128e1f2..9d9db3b296dd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp); static void ins__delete(struct ins_operands *ops) { + if (ops == NULL) + return; zfree(&ops->source.raw); zfree(&ops->source.name); zfree(&ops->target.raw); diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 3ed7c0476d48..2e2ba2efa0d9 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) $(ECHO) " CC " $@ - $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ + $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ $(QUIET) $(STRIPCMD) $@ $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 4e511221a0c1..0db571340edb 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -22,6 +22,14 @@ TARGETS += vm TARGETS_HOTPLUG = cpu-hotplug TARGETS_HOTPLUG += memory-hotplug +# Clear LDFLAGS and MAKEFLAGS if called from main +# Makefile to avoid test build failures when test +# Makefile doesn't have explicit build rules. +ifeq (1,$(MAKELEVEL)) +undefine LDFLAGS +override MAKEFLAGS = +endif + all: for TARGET in $(TARGETS); do \ make -C $$TARGET; \ diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index e238c9559caf..8d5d1d2ee7c1 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c @@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp, #ifdef __NR_execveat return syscall(__NR_execveat, fd, path, argv, envp, flags); #else - errno = -ENOSYS; + errno = ENOSYS; return -1; #endif } @@ -234,6 +234,14 @@ static int run_tests(void) int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); + /* Check if we have execveat at all, and bail early if not */ + errno = 0; + execveat_(-1, NULL, NULL, NULL, 0); + if (errno == ENOSYS) { + printf("[FAIL] ENOSYS calling execveat - no kernel support?\n"); + return 1; + } + /* Change file position to confirm it doesn't affect anything */ lseek(fd, 10, SEEK_SET); diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index a0a7b5d1a070..f9b9c7c51372 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c @@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); + else + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); } static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) @@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; } +static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; +} + static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; @@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, .get_elrsr = vgic_v2_get_elrsr, .get_eisr = vgic_v2_get_eisr, + .clear_eisr = vgic_v2_clear_eisr, .get_interrupt_status = vgic_v2_get_interrupt_status, .enable_underflow = vgic_v2_enable_underflow, .disable_underflow = vgic_v2_disable_underflow, diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 3a62d8a9a2c6..dff06021e748 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); + else + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); } static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) @@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; } +static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; +} + static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; @@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = { .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, .get_elrsr = vgic_v3_get_elrsr, .get_eisr = vgic_v3_get_eisr, + .clear_eisr = vgic_v3_clear_eisr, .get_interrupt_status = vgic_v3_get_interrupt_status, .enable_underflow = vgic_v3_enable_underflow, .disable_underflow = vgic_v3_disable_underflow, diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 0cc6ab6005a0..c9f60f524588 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) return vgic_ops->get_eisr(vcpu); } +static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) +{ + vgic_ops->clear_eisr(vcpu); +} + static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) { return vgic_ops->get_interrupt_status(vcpu); @@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) vgic_set_lr(vcpu, lr_nr, vlr); clear_bit(lr_nr, vgic_cpu->lr_used); vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); } /* @@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vlr.state |= LR_STATE_PENDING; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } } @@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vlr.state |= LR_EOI_INT; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } @@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) if (status & INT_STATUS_UNDERFLOW) vgic_disable_underflow(vcpu); + /* + * In the next iterations of the vcpu loop, if we sync the vgic state + * after flushing it, but before entering the guest (this happens for + * pending signals and vmid rollovers), then make sure we don't pick + * up any old maintenance interrupts here. + */ + vgic_clear_eisr(vcpu); + return level_pending; } @@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) * emulation. So check this here again. KVM_CREATE_DEVICE does * the proper checks already. */ - if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) - return -ENODEV; + if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { + ret = -ENODEV; + goto out; + } /* * Any time a vcpu is run, vcpu_load is called which tries to grab the diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1093700f3a4..cc6a25d95fbf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type) BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; - kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!kvm->memslots) goto out_err_no_srcu; @@ -522,7 +522,7 @@ out_err_no_srcu: out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); - kfree(kvm->memslots); + kvfree(kvm->memslots); kvm_arch_free_vm(kvm); return ERR_PTR(r); } @@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm) kvm_for_each_memslot(memslot, slots) kvm_free_physmem_slot(kvm, memslot, NULL); - kfree(kvm->memslots); + kvfree(kvm->memslots); } static void kvm_destroy_devices(struct kvm *kvm) @@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), - GFP_KERNEL); + slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!slots) goto out_free; + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { slot = id_to_memslot(slots, mem->slot); @@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, kvm_arch_commit_memory_region(kvm, mem, &old, change); kvm_free_physmem_slot(kvm, &old, &new); - kfree(old_memslots); + kvfree(old_memslots); /* * IOMMU mapping: New slots need to be mapped. Old slots need to be @@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; out_slots: - kfree(slots); + kvfree(slots); out_free: kvm_free_physmem_slot(kvm, &new, &old); out: @@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_SIGNAL_MSI: #endif #ifdef CONFIG_HAVE_KVM_IRQFD + case KVM_CAP_IRQFD: case KVM_CAP_IRQFD_RESAMPLE: #endif case KVM_CAP_CHECK_EXTENSION_VM: |