diff options
Diffstat (limited to 'arch')
386 files changed, 11725 insertions, 8174 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index bf9e9d3b3792..f515a4dbf7a0 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -3,6 +3,7 @@ config ALPHA default y select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_USE_CMPXCHG_LOCKREF select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index 37b570d01202..fed9c6f44c19 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -16,6 +16,11 @@ #define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.lock == 0; +} + static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 36dc91ace83a..6cc08166ff00 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1138,6 +1138,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; cputime_t utime, stime; + unsigned long utime_jiffies, stime_jiffies; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; @@ -1146,14 +1147,18 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) switch (who) { case RUSAGE_SELF: task_cputime(current, &utime, &stime); - jiffies_to_timeval32(utime, &r.ru_utime); - jiffies_to_timeval32(stime, &r.ru_stime); + utime_jiffies = cputime_to_jiffies(utime); + stime_jiffies = cputime_to_jiffies(stime); + jiffies_to_timeval32(utime_jiffies, &r.ru_utime); + jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: - jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); - jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); + utime_jiffies = cputime_to_jiffies(current->signal->cutime); + stime_jiffies = cputime_to_jiffies(current->signal->cstime); + jiffies_to_timeval32(utime_jiffies, &r.ru_utime); + jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 82f738e5d54c..cded02c890aa 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -242,12 +242,7 @@ pci_restore_srm_config(void) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - if (pci_has_flag(PCI_PROBE_ONLY) && dev && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); - } + struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 21fcc440fc1a..b76f9a2ce05d 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -210,7 +210,7 @@ }; uart0: serial@44e09000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; clock-frequency = <48000000>; reg = <0x44e09000 0x2000>; @@ -221,7 +221,7 @@ }; uart1: serial@48022000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; clock-frequency = <48000000>; reg = <0x48022000 0x2000>; @@ -232,7 +232,7 @@ }; uart2: serial@48024000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; clock-frequency = <48000000>; reg = <0x48024000 0x2000>; @@ -243,7 +243,7 @@ }; uart3: serial@481a6000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart4"; clock-frequency = <48000000>; reg = <0x481a6000 0x2000>; @@ -252,7 +252,7 @@ }; uart4: serial@481a8000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart5"; clock-frequency = <48000000>; reg = <0x481a8000 0x2000>; @@ -261,7 +261,7 @@ }; uart5: serial@481aa000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart6"; clock-frequency = <48000000>; reg = <0x481aa000 0x2000>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index a63bf78191ea..f9a4b317ed2f 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -693,3 +693,7 @@ }; }; }; + +&pcie1 { + gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; +}; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 1e29ccf77ea2..b058b3146874 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -211,7 +211,7 @@ #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; - pcie@51000000 { + pcie1: pcie@51000000 { compatible = "ti,dra7-pcie"; reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@ -397,7 +397,7 @@ }; uart1: serial@4806a000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806a000 0x100>; interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart1"; @@ -408,7 +408,7 @@ }; uart2: serial@4806c000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806c000 0x100>; interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart2"; @@ -419,7 +419,7 @@ }; uart3: serial@48020000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48020000 0x100>; interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart3"; @@ -430,7 +430,7 @@ }; uart4: serial@4806e000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806e000 0x100>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart4"; @@ -441,7 +441,7 @@ }; uart5: serial@48066000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48066000 0x100>; interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart5"; @@ -452,7 +452,7 @@ }; uart6: serial@48068000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48068000 0x100>; interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart6"; @@ -463,7 +463,7 @@ }; uart7: serial@48420000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48420000 0x100>; interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart7"; @@ -472,7 +472,7 @@ }; uart8: serial@48422000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48422000 0x100>; interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart8"; @@ -481,7 +481,7 @@ }; uart9: serial@48424000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48424000 0x100>; interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart9"; @@ -490,7 +490,7 @@ }; uart10: serial@4ae2b000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4ae2b000 0x100>; interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart10"; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index b57033e8c633..10d0b26c93f1 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -836,10 +836,31 @@ reg = <0x02100000 0x100000>; ranges; - caam@02100000 { - reg = <0x02100000 0x40000>; - interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>, - <0 106 IRQ_TYPE_LEVEL_HIGH>; + crypto: caam@2100000 { + compatible = "fsl,sec-v4.0"; + fsl,sec-era = <4>; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x2100000 0x10000>; + ranges = <0 0x2100000 0x10000>; + interrupt-parent = <&intc>; + clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, + <&clks IMX6QDL_CLK_CAAM_ACLK>, + <&clks IMX6QDL_CLK_CAAM_IPG>, + <&clks IMX6QDL_CLK_EIM_SLOW>; + clock-names = "mem", "aclk", "ipg", "emi_slow"; + + sec_jr0: jr0@1000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr1: jr1@2000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; + }; }; aipstz@0217c000 { /* AIPSTZ2 */ diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 708175d59b9c..e6223d8e79af 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -738,6 +738,33 @@ reg = <0x02100000 0x100000>; ranges; + crypto: caam@2100000 { + compatible = "fsl,sec-v4.0"; + fsl,sec-era = <4>; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x2100000 0x10000>; + ranges = <0 0x2100000 0x10000>; + interrupt-parent = <&intc>; + clocks = <&clks IMX6SX_CLK_CAAM_MEM>, + <&clks IMX6SX_CLK_CAAM_ACLK>, + <&clks IMX6SX_CLK_CAAM_IPG>, + <&clks IMX6SX_CLK_EIM_SLOW>; + clock-names = "mem", "aclk", "ipg", "emi_slow"; + + sec_jr0: jr0@1000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr1: jr1@2000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + usbotg1: usb@02184000 { compatible = "fsl,imx6sx-usb", "fsl,imx27-usb"; reg = <0x02184000 0x200>; diff --git a/arch/arm/boot/dts/kirkwood-d2net.dts b/arch/arm/boot/dts/kirkwood-d2net.dts index 6b7856025001..e1c25c35e9ce 100644 --- a/arch/arm/boot/dts/kirkwood-d2net.dts +++ b/arch/arm/boot/dts/kirkwood-d2net.dts @@ -10,6 +10,7 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-netxbig.dtsi" / { @@ -28,6 +29,10 @@ label = "d2net_v2:blue:sata"; slow-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>; cmd-gpio = <&gpio0 30 GPIO_ACTIVE_HIGH>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; diff --git a/arch/arm/boot/dts/kirkwood-is2.dts b/arch/arm/boot/dts/kirkwood-is2.dts index da674bbd49a8..4121674abd1c 100644 --- a/arch/arm/boot/dts/kirkwood-is2.dts +++ b/arch/arm/boot/dts/kirkwood-is2.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -27,6 +28,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2.dts b/arch/arm/boot/dts/kirkwood-ns2.dts index 53368d1022cc..190189d235e6 100644 --- a/arch/arm/boot/dts/kirkwood-ns2.dts +++ b/arch/arm/boot/dts/kirkwood-ns2.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -27,6 +28,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2max.dts b/arch/arm/boot/dts/kirkwood-ns2max.dts index 72c78d0b1116..55cc41d9c80c 100644 --- a/arch/arm/boot/dts/kirkwood-ns2max.dts +++ b/arch/arm/boot/dts/kirkwood-ns2max.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -46,6 +47,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2mini.dts b/arch/arm/boot/dts/kirkwood-ns2mini.dts index c441bf62c09f..9935f3ec29b4 100644 --- a/arch/arm/boot/dts/kirkwood-ns2mini.dts +++ b/arch/arm/boot/dts/kirkwood-ns2mini.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -47,6 +48,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts index bd35b0674ff6..9bc72a3356e4 100644 --- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts +++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts @@ -17,3 +17,13 @@ status = "ok"; }; }; + +&spmi_bus { + pm8941@0 { + coincell@2800 { + status = "ok"; + qcom,rset-ohms = <2100>; + qcom,vset-millivolts = <3000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi index aa774e685018..968f1043d4f5 100644 --- a/arch/arm/boot/dts/qcom-pm8941.dtsi +++ b/arch/arm/boot/dts/qcom-pm8941.dtsi @@ -125,6 +125,12 @@ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>; qcom,external-resistor-micro-ohms = <10000>; }; + + coincell@2800 { + compatible = "qcom,pm8941-coincell"; + reg = <0x2800>; + status = "disabled"; + }; }; usid1: pm8941@1 { diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index b8f81fb418ce..c5fbde3afcf6 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -220,6 +220,13 @@ clocks { compatible = "stericsson,u8500-clks"; + /* + * Registers for the CLKRST block on peripheral + * groups 1, 2, 3, 5, 6, + */ + reg = <0x8012f000 0x1000>, <0x8011f000 0x1000>, + <0x8000f000 0x1000>, <0xa03ff000 0x1000>, + <0xa03cf000 0x1000>; prcmu_clk: prcmu-clock { #clock-cells = <1>; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 61c03d1fe530..adaa57b7a943 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -241,6 +241,7 @@ compatible = "allwinner,sun4i-a10-axi-gates-clk"; reg = <0x01c2005c 0x4>; clocks = <&axi>; + clock-indices = <0>; clock-output-names = "axi_dram"; }; @@ -257,17 +258,36 @@ compatible = "allwinner,sun4i-a10-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <3>, + <4>, <5>, <6>, + <7>, <8>, <9>, + <10>, <11>, <12>, + <13>, <14>, <16>, + <17>, <18>, <20>, + <21>, <22>, <23>, + <24>, <25>, <26>, + <32>, <33>, <34>, + <35>, <36>, <37>, + <40>, <41>, <43>, + <44>, <45>, + <46>, <47>, + <50>, <52>; clock-output-names = "ahb_usb0", "ahb_ehci0", - "ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss", - "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", - "ahb_mmc2", "ahb_mmc3", "ahb_ms", "ahb_nand", - "ahb_sdram", "ahb_ace", "ahb_emac", "ahb_ts", - "ahb_spi0", "ahb_spi1", "ahb_spi2", "ahb_spi3", - "ahb_pata", "ahb_sata", "ahb_gps", "ahb_ve", - "ahb_tvd", "ahb_tve0", "ahb_tve1", "ahb_lcd0", - "ahb_lcd1", "ahb_csi0", "ahb_csi1", "ahb_hdmi", - "ahb_de_be0", "ahb_de_be1", "ahb_de_fe0", - "ahb_de_fe1", "ahb_mp", "ahb_mali400"; + "ahb_ohci0", "ahb_ehci1", + "ahb_ohci1", "ahb_ss", "ahb_dma", + "ahb_bist", "ahb_mmc0", "ahb_mmc1", + "ahb_mmc2", "ahb_mmc3", "ahb_ms", + "ahb_nand", "ahb_sdram", "ahb_ace", + "ahb_emac", "ahb_ts", "ahb_spi0", + "ahb_spi1", "ahb_spi2", "ahb_spi3", + "ahb_pata", "ahb_sata", "ahb_gps", + "ahb_ve", "ahb_tvd", "ahb_tve0", + "ahb_tve1", "ahb_lcd0", "ahb_lcd1", + "ahb_csi0", "ahb_csi1", "ahb_hdmi", + "ahb_de_be0", "ahb_de_be1", + "ahb_de_fe0", "ahb_de_fe1", + "ahb_mp", "ahb_mali400"; }; apb0: apb0@01c20054 { @@ -283,9 +303,14 @@ compatible = "allwinner,sun4i-a10-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <1>, + <2>, <3>, + <5>, <6>, + <7>, <10>; clock-output-names = "apb0_codec", "apb0_spdif", - "apb0_ac97", "apb0_iis", "apb0_pio", "apb0_ir0", - "apb0_ir1", "apb0_keypad"; + "apb0_ac97", "apb0_iis", + "apb0_pio", "apb0_ir0", + "apb0_ir1", "apb0_keypad"; }; apb1: clk@01c20058 { @@ -301,12 +326,22 @@ compatible = "allwinner,sun4i-a10-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <4>, + <5>, <6>, + <7>, <16>, + <17>, <18>, + <19>, <20>, + <21>, <22>, + <23>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_can", "apb1_scr", - "apb1_ps20", "apb1_ps21", "apb1_uart0", - "apb1_uart1", "apb1_uart2", "apb1_uart3", - "apb1_uart4", "apb1_uart5", "apb1_uart6", - "apb1_uart7"; + "apb1_i2c2", "apb1_can", + "apb1_scr", "apb1_ps20", + "apb1_ps21", "apb1_uart0", + "apb1_uart1", "apb1_uart2", + "apb1_uart3", "apb1_uart4", + "apb1_uart5", "apb1_uart6", + "apb1_uart7"; }; nand_clk: clk@01c20080 { @@ -643,6 +678,14 @@ status = "disabled"; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <86>; + clocks = <&ahb_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + }; + spi2: spi@01c17000 { compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index f11efb722bbb..a513b416a807 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -85,6 +85,17 @@ compatible = "allwinner,sun5i-a10s-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <5>, <6>, + <7>, <8>, <9>, + <10>, <13>, + <14>, <17>, <18>, + <20>, <21>, <22>, + <26>, <28>, <32>, + <34>, <36>, <40>, + <43>, <44>, + <46>, <51>, + <52>; clock-output-names = "ahb_usbotg", "ahb_ehci", "ahb_ohci", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", @@ -103,6 +114,9 @@ compatible = "allwinner,sun5i-a10s-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <3>, + <5>, <6>, + <10>; clock-output-names = "apb0_codec", "apb0_iis", "apb0_pio", "apb0_ir", "apb0_keypad"; @@ -113,9 +127,14 @@ compatible = "allwinner,sun5i-a10s-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <16>, + <17>, <18>, + <19>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart0", "apb1_uart1", - "apb1_uart2", "apb1_uart3"; + "apb1_i2c2", "apb1_uart0", + "apb1_uart1", "apb1_uart2", + "apb1_uart3"; }; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 976d4faa2179..f3631c9c6fa2 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -104,6 +104,16 @@ compatible = "allwinner,sun5i-a13-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <5>, <6>, + <7>, <8>, <9>, + <10>, <13>, + <14>, <20>, + <21>, <22>, + <28>, <32>, <36>, + <40>, <44>, + <46>, <51>, + <52>; clock-output-names = "ahb_usbotg", "ahb_ehci", "ahb_ohci", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", @@ -121,6 +131,8 @@ compatible = "allwinner,sun5i-a13-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <5>, + <6>; clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir"; }; @@ -130,8 +142,12 @@ compatible = "allwinner,sun5i-a13-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <17>, + <19>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart1", "apb1_uart3"; + "apb1_i2c2", "apb1_uart1", + "apb1_uart3"; }; }; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index 54b097830434..427c0e7289fa 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -178,6 +178,7 @@ compatible = "allwinner,sun4i-a10-axi-gates-clk"; reg = <0x01c2005c 0x4>; clocks = <&axi>; + clock-indices = <0>; clock-output-names = "axi_dram"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 008047a018cf..e4d3484d97bd 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -252,6 +252,20 @@ compatible = "allwinner,sun6i-a31-ahb1-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb1>; + clock-indices = <1>, <5>, + <6>, <8>, <9>, + <10>, <11>, <12>, + <13>, <14>, + <17>, <18>, <19>, + <20>, <21>, <22>, + <23>, <24>, <26>, + <27>, <29>, + <30>, <31>, <32>, + <36>, <37>, <40>, + <43>, <44>, <45>, + <46>, <47>, <50>, + <52>, <55>, <56>, + <57>, <58>; clock-output-names = "ahb1_mipidsi", "ahb1_ss", "ahb1_dma", "ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2", "ahb1_mmc3", "ahb1_nand1", @@ -281,6 +295,9 @@ compatible = "allwinner,sun6i-a31-apb1-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb1>; + clock-indices = <0>, <4>, + <5>, <12>, + <13>; clock-output-names = "apb1_codec", "apb1_digital_mic", "apb1_pio", "apb1_daudio0", "apb1_daudio1"; @@ -299,6 +316,10 @@ compatible = "allwinner,sun6i-a31-apb2-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb2>; + clock-indices = <0>, <1>, + <2>, <3>, <16>, + <17>, <18>, <19>, + <20>, <21>; clock-output-names = "apb2_i2c0", "apb2_i2c1", "apb2_i2c2", "apb2_i2c3", "apb2_uart0", "apb2_uart1", @@ -346,6 +367,14 @@ "mmc3_sample"; }; + ss_clk: clk@01c2009c { + #clock-cells = <0>; + compatible = "allwinner,sun4i-a10-mod0-clk"; + reg = <0x01c2009c 0x4>; + clocks = <&osc24M>, <&pll6 0>; + clock-output-names = "ss"; + }; + spi0_clk: clk@01c200a0 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-mod0-clk"; @@ -384,6 +413,9 @@ compatible = "allwinner,sun6i-a31-usb-clk"; reg = <0x01c200cc 0x4>; clocks = <&osc24M>; + clock-indices = <8>, <9>, <10>, + <16>, <17>, + <18>; clock-output-names = "usb_phy0", "usb_phy1", "usb_phy2", "usb_ohci0", "usb_ohci1", "usb_ohci2"; @@ -870,6 +902,16 @@ #size-cells = <0>; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ahb1_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + resets = <&ahb1_rst 5>; + reset-names = "ahb"; + }; + timer@01c60000 { compatible = "allwinner,sun6i-a31-hstimer", "allwinner,sun7i-a20-hstimer"; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 6a63f30c9a69..d3b2f26417aa 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -267,6 +267,19 @@ compatible = "allwinner,sun7i-a20-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, <8>, + <9>, <10>, <11>, <12>, + <13>, <14>, <16>, + <17>, <18>, <20>, <21>, + <22>, <23>, <25>, + <28>, <32>, <33>, <34>, + <35>, <36>, <37>, <40>, + <41>, <42>, <43>, + <44>, <45>, <46>, + <47>, <49>, <50>, + <52>; clock-output-names = "ahb_usb0", "ahb_ehci0", "ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", @@ -295,6 +308,10 @@ compatible = "allwinner,sun7i-a20-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, + <8>, <10>; clock-output-names = "apb0_codec", "apb0_spdif", "apb0_ac97", "apb0_iis0", "apb0_iis1", "apb0_pio", "apb0_ir0", "apb0_ir1", @@ -314,6 +331,12 @@ compatible = "allwinner,sun7i-a20-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, + <15>, <16>, <17>, + <18>, <19>, <20>, + <21>, <22>, <23>; clock-output-names = "apb1_i2c0", "apb1_i2c1", "apb1_i2c2", "apb1_i2c3", "apb1_can", "apb1_scr", "apb1_ps20", "apb1_ps21", @@ -731,6 +754,14 @@ status = "disabled"; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ahb_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + }; + spi2: spi@01c17000 { compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index 7abd0ae3143d..c318c770b6c1 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -180,6 +180,15 @@ compatible = "allwinner,sun8i-a23-ahb1-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb1>; + clock-indices = <1>, <6>, + <8>, <9>, <10>, + <13>, <14>, + <19>, <20>, + <21>, <24>, <26>, + <29>, <32>, <36>, + <40>, <44>, <46>, + <52>, <54>, + <57>; clock-output-names = "ahb1_mipidsi", "ahb1_dma", "ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2", "ahb1_nand", "ahb1_sdram", @@ -196,6 +205,8 @@ compatible = "allwinner,sun8i-a23-apb1-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb1>; + clock-indices = <0>, <5>, + <12>, <13>; clock-output-names = "apb1_codec", "apb1_pio", "apb1_daudio0", "apb1_daudio1"; }; @@ -213,6 +224,10 @@ compatible = "allwinner,sun8i-a23-apb2-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb2>; + clock-indices = <0>, <1>, + <2>, <16>, + <17>, <18>, + <19>, <20>; clock-output-names = "apb2_i2c0", "apb2_i2c1", "apb2_i2c2", "apb2_uart0", "apb2_uart1", "apb2_uart2", diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index a43ad779ee2f..5908e3dcf965 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -277,9 +277,12 @@ compatible = "allwinner,sun9i-a80-ahb0-gates-clk"; reg = <0x06000580 0x4>; clocks = <&ahb0>; - clock-indices = <0>, <1>, <3>, <5>, <8>, <12>, <13>, - <14>, <15>, <16>, <18>, <20>, <21>, - <22>, <23>; + clock-indices = <0>, <1>, <3>, + <5>, <8>, <12>, + <13>, <14>, + <15>, <16>, <18>, + <20>, <21>, <22>, + <23>; clock-output-names = "ahb0_fd", "ahb0_ve", "ahb0_gpu", "ahb0_ss", "ahb0_sd", "ahb0_nand1", "ahb0_nand0", "ahb0_sdram", @@ -293,7 +296,10 @@ compatible = "allwinner,sun9i-a80-ahb1-gates-clk"; reg = <0x06000584 0x4>; clocks = <&ahb1>; - clock-indices = <0>, <1>, <17>, <21>, <22>, <23>, <24>; + clock-indices = <0>, <1>, + <17>, <21>, + <22>, <23>, + <24>; clock-output-names = "ahb1_usbotg", "ahb1_usbhci", "ahb1_gmac", "ahb1_msgbox", "ahb1_spinlock", "ahb1_hstimer", @@ -305,8 +311,9 @@ compatible = "allwinner,sun9i-a80-ahb2-gates-clk"; reg = <0x06000588 0x4>; clocks = <&ahb2>; - clock-indices = <0>, <1>, <2>, <4>, <5>, <7>, <8>, - <11>; + clock-indices = <0>, <1>, + <2>, <4>, <5>, + <7>, <8>, <11>; clock-output-names = "ahb2_lcd0", "ahb2_lcd1", "ahb2_edp", "ahb2_csi", "ahb2_hdmi", "ahb2_de", "ahb2_mp", "ahb2_mipi_dsi"; @@ -317,8 +324,10 @@ compatible = "allwinner,sun9i-a80-apb0-gates-clk"; reg = <0x06000590 0x4>; clocks = <&apb0>; - clock-indices = <1>, <5>, <11>, <12>, <13>, <15>, - <17>, <18>, <19>; + clock-indices = <1>, <5>, + <11>, <12>, <13>, + <15>, <17>, <18>, + <19>; clock-output-names = "apb0_spdif", "apb0_pio", "apb0_ac97", "apb0_i2s0", "apb0_i2s1", "apb0_lradc", "apb0_gpadc", "apb0_twd", @@ -330,8 +339,11 @@ compatible = "allwinner,sun9i-a80-apb1-gates-clk"; reg = <0x06000594 0x4>; clocks = <&apb1>; - clock-indices = <0>, <1>, <2>, <3>, <4>, - <16>, <17>, <18>, <19>, <20>, <21>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <16>, <17>, + <18>, <19>, + <20>, <21>; clock-output-names = "apb1_i2c0", "apb1_i2c1", "apb1_i2c2", "apb1_i2c3", "apb1_i2c4", "apb1_uart0", "apb1_uart1", diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index b47863d49ac6..7569b391704e 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y # CONFIG_FTRACE is not set # CONFIG_ARM_UNWIND is not set CONFIG_SECURITYFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_HW is not set +CONFIG_CRYPTO_DEV_FSL_CAAM=y CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_CRC7=m diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore index 6231d36b3635..31e1f538df7d 100644 --- a/arch/arm/crypto/.gitignore +++ b/arch/arm/crypto/.gitignore @@ -1 +1,3 @@ aesbs-core.S +sha256-core.S +sha512-core.S diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 28b9bb35949e..8857d2869a5f 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -19,9 +19,7 @@ struct pci_bus; struct device; struct hw_pci { -#ifdef CONFIG_PCI_MSI struct msi_controller *msi_ctrl; -#endif struct pci_ops *ops; int nr_controllers; void **private_data; @@ -42,9 +40,6 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { -#ifdef CONFIG_PCI_MSI - struct msi_controller *msi_ctrl; -#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259469f7..12ebfcc1d539 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,9 @@ * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index fcbbbb1b9e95..874e1823f803 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -18,15 +18,6 @@ static int debug_pci; -#ifdef CONFIG_PCI_MSI -struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) -{ - struct pci_sys_data *sysdata = dev->bus->sysdata; - - return sysdata->msi_ctrl; -} -#endif - /* * We can't use pci_get_device() here since we are * called from interrupt context. @@ -459,12 +450,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL); - if (!sys) - panic("PCI: unable to allocate sys data!"); + if (WARN(!sys, "PCI: unable to allocate sys data!")) + break; -#ifdef CONFIG_PCI_MSI - sys->msi_ctrl = hw->msi_ctrl; -#endif sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; @@ -486,11 +474,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, if (hw->scan) sys->bus = hw->scan(nr, sys); else - sys->bus = pci_scan_root_bus(parent, sys->busnr, - hw->ops, sys, &sys->resources); + sys->bus = pci_scan_root_bus_msi(parent, + sys->busnr, hw->ops, sys, + &sys->resources, hw->msi_ctrl); - if (!sys->bus) - panic("PCI: unable to scan bus!"); + if (WARN(!sys->bus, "PCI: unable to scan bus!")) { + kfree(sys); + break; + } busnr = sys->bus->busn_res.end + 1; @@ -521,6 +512,8 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) struct pci_bus *bus = sys->bus; if (!pci_has_flag(PCI_PROBE_ONLY)) { + struct pci_bus *child; + /* * Size the bridge windows. */ @@ -530,25 +523,15 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) * Assign resources. */ pci_bus_assign_resources(bus); - } + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } /* * Tell drivers about devices found. */ pci_bus_add_devices(bus); } - - list_for_each_entry(sys, &head, node) { - struct pci_bus *bus = sys->bus; - - /* Configure PCI Express settings */ - if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { - struct pci_bus *child; - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - } - } } #ifndef CONFIG_PCI_HOST_ITE8152 diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index eaf58f88ef5d..685826c4a710 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -8,7 +8,6 @@ * Licensed under GPLv2 or later. */ -#include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_platform.h> diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index ddfdd820e6f2..29e08aac8294 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -1010,11 +1010,13 @@ static struct davinci_spi_platform_data da8xx_spi_pdata[] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, + .prescaler_limit = 2, }, [1] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, + .prescaler_limit = 2, }, }; diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9cbeda798584..567dc56fe8cd 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -411,6 +411,7 @@ static struct davinci_spi_platform_data dm355_spi0_pdata = { .num_chipselect = 2, .cshold_bug = true, .dma_event_q = EVENTQ_1, + .prescaler_limit = 1, }; static struct platform_device dm355_spi0_device = { .name = "spi_davinci", diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index e3a3c54b6832..6a890a8486d0 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -646,6 +646,7 @@ static struct davinci_spi_platform_data dm365_spi0_pdata = { .version = SPI_VERSION_1, .num_chipselect = 2, .dma_event_q = EVENTQ_3, + .prescaler_limit = 1, }; static struct resource dm365_spi0_resources[] = { diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c index c6bd7c7bd4aa..8cc62150116a 100644 --- a/arch/arm/mach-hisi/hisilicon.c +++ b/arch/arm/mach-hisi/hisilicon.c @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include <linux/clk-provider.h> #include <linux/clocksource.h> #include <linux/irqchip.h> diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index edea697e8253..e283939a216f 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -16,7 +16,6 @@ #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> -#include <linux/clk-provider.h> #include <linux/of.h> static struct dev_pm_domain keystone_pm_domain = { diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index afee9083ad92..9f739f3cad4c 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -14,7 +14,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 903c85be2897..7892c7d3b6f4 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -12,8 +12,7 @@ obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o timer.o pm.o \ hwmod-common = omap_hwmod.o omap_hwmod_reset.o \ omap_hwmod_common_data.o -clock-common = clock.o clock_common_data.o \ - clkt_dpll.o clkt_clksel.o +clock-common = clock.o secure-common = omap-smc.o omap-secure.o obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) @@ -182,24 +181,17 @@ obj-$(CONFIG_SOC_DRA7XX) += $(clockdomain-common) obj-$(CONFIG_SOC_DRA7XX) += clockdomains7xx_data.o # Clock framework -obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o +obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o -obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o -obj-$(CONFIG_SOC_OMAP2430) += clock2430.o -obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o -obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o -obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o -obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o -obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o +obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o +obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) +obj-$(CONFIG_ARCH_OMAP3) += clkt34xx_dpll3m2.o obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) -obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o -obj-$(CONFIG_SOC_AM33XX) += $(clock-common) dpll3xxx.o +obj-$(CONFIG_SOC_AM33XX) += $(clock-common) obj-$(CONFIG_SOC_OMAP5) += $(clock-common) -obj-$(CONFIG_SOC_OMAP5) += dpll3xxx.o dpll44xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clock-common) -obj-$(CONFIG_SOC_DRA7XX) += dpll3xxx.o dpll44xx.o -obj-$(CONFIG_SOC_AM43XX) += $(clock-common) dpll3xxx.o +obj-$(CONFIG_SOC_AM43XX) += $(clock-common) # OMAP2 clock rate set data (old "OPP" data) obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index eb69acf21014..3f6521313c93 100644 --- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c @@ -23,12 +23,13 @@ #include "clock.h" #include "clock3xxx.h" -#include "clock34xx.h" #include "sdrc.h" #include "sram.h" #define CYCLES_PER_MHZ 1000000 +struct clk *sdrc_ick_p, *arm_fck_p; + /* * CORE DPLL (DPLL3) M2 divider rate programming functions * @@ -60,12 +61,14 @@ int omap3_core_dpll_m2_set_rate(struct clk_hw *hw, unsigned long rate, if (!clk || !rate) return -EINVAL; - validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); + new_div = DIV_ROUND_UP(parent_rate, rate); + validrate = parent_rate / new_div; + if (validrate != rate) return -EINVAL; - sdrcrate = __clk_get_rate(sdrc_ick_p); - clkrate = __clk_get_rate(hw->clk); + sdrcrate = clk_get_rate(sdrc_ick_p); + clkrate = clk_hw_get_rate(hw); if (rate > clkrate) sdrcrate <<= ((rate / clkrate) >> 1); else @@ -83,7 +86,7 @@ int omap3_core_dpll_m2_set_rate(struct clk_hw *hw, unsigned long rate, /* * XXX This only needs to be done when the CPU frequency changes */ - _mpurate = __clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ; + _mpurate = clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ; c = (_mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT; c += 1; /* for safety */ c *= SDRC_MPURATE_LOOPS; diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c deleted file mode 100644 index 7ee26108ac0d..000000000000 --- a/arch/arm/mach-omap2/clkt_clksel.c +++ /dev/null @@ -1,466 +0,0 @@ -/* - * clkt_clksel.c - OMAP2/3/4 clksel clock functions - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * - * clksel clocks are clocks that do not have a fixed parent, or that - * can divide their parent's rate, or possibly both at the same time, based - * on the contents of a hardware register bitfield. - * - * All of the various mux and divider settings can be encoded into - * struct clksel* data structures, and then these can be autogenerated - * from some hardware database for each new chip generation. This - * should avoid the need to write, review, and validate a lot of new - * clock code for each new chip, since it can be exported from the SoC - * design flow. This is now done on OMAP4. - * - * The fusion of mux and divider clocks is a software creation. In - * hardware reality, the multiplexer (parent selection) and the - * divider exist separately. XXX At some point these clksel clocks - * should be split into "divider" clocks and "mux" clocks to better - * match the hardware. - * - * (The name "clksel" comes from the name of the corresponding - * register field in the OMAP2/3 family of SoCs.) - * - * XXX Currently these clocks are only used in the OMAP2/3/4 code, but - * many of the OMAP1 clocks should be convertible to use this - * mechanism. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk-provider.h> -#include <linux/io.h> -#include <linux/bug.h> - -#include "clock.h" - -/* Private functions */ - -/** - * _get_clksel_by_parent() - return clksel struct for a given clk & parent - * @clk: OMAP struct clk ptr to inspect - * @src_clk: OMAP struct clk ptr of the parent clk to search for - * - * Scan the struct clksel array associated with the clock to find - * the element associated with the supplied parent clock address. - * Returns a pointer to the struct clksel on success or NULL on error. - */ -static const struct clksel *_get_clksel_by_parent(struct clk_hw_omap *clk, - struct clk *src_clk) -{ - const struct clksel *clks; - - if (!src_clk) - return NULL; - - for (clks = clk->clksel; clks->parent; clks++) - if (clks->parent == src_clk) - break; /* Found the requested parent */ - - if (!clks->parent) { - /* This indicates a data problem */ - WARN(1, "clock: %s: could not find parent clock %s in clksel array\n", - __clk_get_name(clk->hw.clk), __clk_get_name(src_clk)); - return NULL; - } - - return clks; -} - -/** - * _write_clksel_reg() - program a clock's clksel register in hardware - * @clk: struct clk * to program - * @v: clksel bitfield value to program (with LSB at bit 0) - * - * Shift the clksel register bitfield value @v to its appropriate - * location in the clksel register and write it in. This function - * will ensure that the write to the clksel_reg reaches its - * destination before returning -- important since PRM and CM register - * accesses can be quite slow compared to ARM cycles -- but does not - * take into account any time the hardware might take to switch the - * clock source. - */ -static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val) -{ - u32 v; - - v = omap2_clk_readl(clk, clk->clksel_reg); - v &= ~clk->clksel_mask; - v |= field_val << __ffs(clk->clksel_mask); - omap2_clk_writel(v, clk, clk->clksel_reg); - - v = omap2_clk_readl(clk, clk->clksel_reg); /* OCP barrier */ -} - -/** - * _clksel_to_divisor() - turn clksel field value into integer divider - * @clk: OMAP struct clk to use - * @field_val: register field value to find - * - * Given a struct clk of a rate-selectable clksel clock, and a register field - * value to search for, find the corresponding clock divisor. The register - * field value should be pre-masked and shifted down so the LSB is at bit 0 - * before calling. Returns 0 on error or returns the actual integer divisor - * upon success. - */ -static u32 _clksel_to_divisor(struct clk_hw_omap *clk, u32 field_val) -{ - const struct clksel *clks; - const struct clksel_rate *clkr; - struct clk *parent; - - parent = __clk_get_parent(clk->hw.clk); - - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return 0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->val == field_val) - break; - } - - if (!clkr->div) { - /* This indicates a data error */ - WARN(1, "clock: %s: could not find fieldval %d for parent %s\n", - __clk_get_name(clk->hw.clk), field_val, - __clk_get_name(parent)); - return 0; - } - - return clkr->div; -} - -/** - * _divisor_to_clksel() - turn clksel integer divisor into a field value - * @clk: OMAP struct clk to use - * @div: integer divisor to search for - * - * Given a struct clk of a rate-selectable clksel clock, and a clock - * divisor, find the corresponding register field value. Returns the - * register field value _before_ left-shifting (i.e., LSB is at bit - * 0); or returns 0xFFFFFFFF (~0) upon error. - */ -static u32 _divisor_to_clksel(struct clk_hw_omap *clk, u32 div) -{ - const struct clksel *clks; - const struct clksel_rate *clkr; - struct clk *parent; - - /* should never happen */ - WARN_ON(div == 0); - - parent = __clk_get_parent(clk->hw.clk); - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return ~0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->div == div) - break; - } - - if (!clkr->div) { - pr_err("clock: %s: could not find divisor %d for parent %s\n", - __clk_get_name(clk->hw.clk), div, - __clk_get_name(parent)); - return ~0; - } - - return clkr->val; -} - -/** - * _read_divisor() - get current divisor applied to parent clock (from hdwr) - * @clk: OMAP struct clk to use. - * - * Read the current divisor register value for @clk that is programmed - * into the hardware, convert it into the actual divisor value, and - * return it; or return 0 on error. - */ -static u32 _read_divisor(struct clk_hw_omap *clk) -{ - u32 v; - - if (!clk->clksel || !clk->clksel_mask) - return 0; - - v = omap2_clk_readl(clk, clk->clksel_reg); - v &= clk->clksel_mask; - v >>= __ffs(clk->clksel_mask); - - return _clksel_to_divisor(clk, v); -} - -/* Public functions */ - -/** - * omap2_clksel_round_rate_div() - find divisor for the given clock and rate - * @clk: OMAP struct clk to use - * @target_rate: desired clock rate - * @new_div: ptr to where we should store the divisor - * - * Finds 'best' divider value in an array based on the source and target - * rates. The divider array must be sorted with smallest divider first. - * This function is also used by the DPLL3 M2 divider code. - * - * Returns the rounded clock rate or returns 0xffffffff on error. - */ -u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk, - unsigned long target_rate, - u32 *new_div) -{ - unsigned long test_rate; - const struct clksel *clks; - const struct clksel_rate *clkr; - u32 last_div = 0; - struct clk *parent; - unsigned long parent_rate; - const char *clk_name; - - parent = __clk_get_parent(clk->hw.clk); - clk_name = __clk_get_name(clk->hw.clk); - parent_rate = __clk_get_rate(parent); - - if (!clk->clksel || !clk->clksel_mask) - return ~0; - - pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n", - clk_name, target_rate); - - *new_div = 1; - - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return ~0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - /* Sanity check */ - if (clkr->div <= last_div) - pr_err("clock: %s: clksel_rate table not sorted\n", - clk_name); - - last_div = clkr->div; - - test_rate = parent_rate / clkr->div; - - if (test_rate <= target_rate) - break; /* found it */ - } - - if (!clkr->div) { - pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n", - clk_name, target_rate, __clk_get_name(parent)); - return ~0; - } - - *new_div = clkr->div; - - pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div, - (parent_rate / clkr->div)); - - return parent_rate / clkr->div; -} - -/* - * Clocktype interface functions to the OMAP clock code - * (i.e., those used in struct clk field function pointers, etc.) - */ - -/** - * omap2_clksel_find_parent_index() - return the array index of the current - * hardware parent of @hw - * @hw: struct clk_hw * to find the current hardware parent of - * - * Given a struct clk_hw pointer @hw to the 'hw' member of a struct - * clk_hw_omap record representing a source-selectable hardware clock, - * read the hardware register and determine what its parent is - * currently set to. Intended to be called only by the common clock - * framework struct clk_hw_ops.get_parent function pointer. Return - * the array index of this parent clock upon success -- there is no - * way to return an error, so if we encounter an error, just WARN() - * and pretend that we know that we're doing. - */ -u8 omap2_clksel_find_parent_index(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - const struct clksel *clks; - const struct clksel_rate *clkr; - u32 r, found = 0; - struct clk *parent; - const char *clk_name; - int ret = 0, f = 0; - - parent = __clk_get_parent(hw->clk); - clk_name = __clk_get_name(hw->clk); - - /* XXX should be able to return an error */ - WARN((!clk->clksel || !clk->clksel_mask), - "clock: %s: attempt to call on a non-clksel clock", clk_name); - - r = omap2_clk_readl(clk, clk->clksel_reg) & clk->clksel_mask; - r >>= __ffs(clk->clksel_mask); - - for (clks = clk->clksel; clks->parent && !found; clks++) { - for (clkr = clks->rates; clkr->div && !found; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->val == r) { - found = 1; - ret = f; - } - } - f++; - } - - /* This indicates a data error */ - WARN(!found, "clock: %s: init parent: could not find regval %0x\n", - clk_name, r); - - return ret; -} - - -/** - * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field - * @clk: struct clk * - * - * This function is intended to be called only by the clock framework. - * Each clksel clock should have its struct clk .recalc field set to this - * function. Returns the clock's current rate, based on its parent's rate - * and its current divisor setting in the hardware. - */ -unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate) -{ - unsigned long rate; - u32 div = 0; - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - if (!parent_rate) - return 0; - - div = _read_divisor(clk); - if (!div) - rate = parent_rate; - else - rate = parent_rate / div; - - pr_debug("%s: recalc'd %s's rate to %lu (div %d)\n", __func__, - __clk_get_name(hw->clk), rate, div); - - return rate; -} - -/** - * omap2_clksel_round_rate() - find rounded rate for the given clock and rate - * @clk: OMAP struct clk to use - * @target_rate: desired clock rate - * - * This function is intended to be called only by the clock framework. - * Finds best target rate based on the source clock and possible dividers. - * rates. The divider array must be sorted with smallest divider first. - * - * Returns the rounded clock rate or returns 0xffffffff on error. - */ -long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 new_div; - - return omap2_clksel_round_rate_div(clk, target_rate, &new_div); -} - -/** - * omap2_clksel_set_rate() - program clock rate in hardware - * @clk: struct clk * to program rate - * @rate: target rate to program - * - * This function is intended to be called only by the clock framework. - * Program @clk's rate to @rate in the hardware. The clock can be - * either enabled or disabled when this happens, although if the clock - * is enabled, some downstream devices may glitch or behave - * unpredictably when the clock rate is changed - this depends on the - * hardware. This function does not currently check the usecount of - * the clock, so if multiple drivers are using the clock, and the rate - * is changed, they will all be affected without any notification. - * Returns -EINVAL upon error, or 0 upon success. - */ -int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 field_val, validrate, new_div = 0; - - if (!clk->clksel || !clk->clksel_mask) - return -EINVAL; - - validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); - if (validrate != rate) - return -EINVAL; - - field_val = _divisor_to_clksel(clk, new_div); - if (field_val == ~0) - return -EINVAL; - - _write_clksel_reg(clk, field_val); - - pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(hw->clk), - __clk_get_rate(hw->clk)); - - return 0; -} - -/* - * Clksel parent setting function - not passed in struct clk function - * pointer - instead, the OMAP clock code currently assumes that any - * parent-setting clock is a clksel clock, and calls - * omap2_clksel_set_parent() by default - */ - -/** - * omap2_clksel_set_parent() - change a clock's parent clock - * @clk: struct clk * of the child clock - * @new_parent: struct clk * of the new parent clock - * - * This function is intended to be called only by the clock framework. - * Change the parent clock of clock @clk to @new_parent. This is - * intended to be used while @clk is disabled. This function does not - * currently check the usecount of the clock, so if multiple drivers - * are using the clock, and the parent is changed, they will all be - * affected without any notification. Returns -EINVAL upon error, or - * 0 upon success. - */ -int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - if (!clk->clksel || !clk->clksel_mask) - return -EINVAL; - - _write_clksel_reg(clk, field_val); - return 0; -} diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c deleted file mode 100644 index f251a14cbf16..000000000000 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ /dev/null @@ -1,370 +0,0 @@ -/* - * OMAP2/3/4 DPLL clock functions - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include <asm/div64.h> - -#include "clock.h" - -/* DPLL rate rounding: minimum DPLL multiplier, divider values */ -#define DPLL_MIN_MULTIPLIER 2 -#define DPLL_MIN_DIVIDER 1 - -/* Possible error results from _dpll_test_mult */ -#define DPLL_MULT_UNDERFLOW -1 - -/* - * Scale factor to mitigate roundoff errors in DPLL rate rounding. - * The higher the scale factor, the greater the risk of arithmetic overflow, - * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR - * must be a power of DPLL_SCALE_BASE. - */ -#define DPLL_SCALE_FACTOR 64 -#define DPLL_SCALE_BASE 2 -#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ - (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) - -/* - * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. - * From device data manual section 4.3 "DPLL and DLL Specifications". - */ -#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000 -#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000 - -/* _dpll_test_fint() return codes */ -#define DPLL_FINT_UNDERFLOW -1 -#define DPLL_FINT_INVALID -2 - -/* Private functions */ - -/* - * _dpll_test_fint - test whether an Fint value is valid for the DPLL - * @clk: DPLL struct clk to test - * @n: divider value (N) to test - * - * Tests whether a particular divider @n will result in a valid DPLL - * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter - * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate - * (assuming that it is counting N upwards), or -2 if the enclosing loop - * should skip to the next iteration (again assuming N is increasing). - */ -static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) -{ - struct dpll_data *dd; - long fint, fint_min, fint_max; - int ret = 0; - - dd = clk->dpll_data; - - /* DPLL divider must result in a valid jitter correction val */ - fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n; - - if (dd->flags & DPLL_J_TYPE) { - fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; - fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; - } else { - fint_min = ti_clk_features.fint_min; - fint_max = ti_clk_features.fint_max; - } - - if (!fint_min || !fint_max) { - WARN(1, "No fint limits available!\n"); - return DPLL_FINT_INVALID; - } - - if (fint < ti_clk_features.fint_min) { - pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", - n); - dd->max_divider = n; - ret = DPLL_FINT_UNDERFLOW; - } else if (fint > ti_clk_features.fint_max) { - pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", - n); - dd->min_divider = n; - ret = DPLL_FINT_INVALID; - } else if (fint > ti_clk_features.fint_band1_max && - fint < ti_clk_features.fint_band2_min) { - pr_debug("rejecting n=%d due to Fint failure\n", n); - ret = DPLL_FINT_INVALID; - } - - return ret; -} - -static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, - unsigned int m, unsigned int n) -{ - unsigned long long num; - - num = (unsigned long long)parent_rate * m; - do_div(num, n); - return num; -} - -/* - * _dpll_test_mult - test a DPLL multiplier value - * @m: pointer to the DPLL m (multiplier) value under test - * @n: current DPLL n (divider) value under test - * @new_rate: pointer to storage for the resulting rounded rate - * @target_rate: the desired DPLL rate - * @parent_rate: the DPLL's parent clock rate - * - * This code tests a DPLL multiplier value, ensuring that the - * resulting rate will not be higher than the target_rate, and that - * the multiplier value itself is valid for the DPLL. Initially, the - * integer pointed to by the m argument should be prescaled by - * multiplying by DPLL_SCALE_FACTOR. The code will replace this with - * a non-scaled m upon return. This non-scaled m will result in a - * new_rate as close as possible to target_rate (but not greater than - * target_rate) given the current (parent_rate, n, prescaled m) - * triple. Returns DPLL_MULT_UNDERFLOW in the event that the - * non-scaled m attempted to underflow, which can allow the calling - * function to bail out early; or 0 upon success. - */ -static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, - unsigned long target_rate, - unsigned long parent_rate) -{ - int r = 0, carry = 0; - - /* Unscale m and round if necessary */ - if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) - carry = 1; - *m = (*m / DPLL_SCALE_FACTOR) + carry; - - /* - * The new rate must be <= the target rate to avoid programming - * a rate that is impossible for the hardware to handle - */ - *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); - if (*new_rate > target_rate) { - (*m)--; - *new_rate = 0; - } - - /* Guard against m underflow */ - if (*m < DPLL_MIN_MULTIPLIER) { - *m = DPLL_MIN_MULTIPLIER; - *new_rate = 0; - r = DPLL_MULT_UNDERFLOW; - } - - if (*new_rate == 0) - *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); - - return r; -} - -/** - * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not - * @v: bitfield value of the DPLL enable - * - * Checks given DPLL enable bitfield to see whether the DPLL is in bypass - * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise. - */ -static int _omap2_dpll_is_in_bypass(u32 v) -{ - u8 mask, val; - - mask = ti_clk_features.dpll_bypass_vals; - - /* - * Each set bit in the mask corresponds to a bypass value equal - * to the bitshift. Go through each set-bit in the mask and - * compare against the given register value. - */ - while (mask) { - val = __ffs(mask); - mask ^= (1 << val); - if (v == val) - return 1; - } - - return 0; -} - -/* Public functions */ -u8 omap2_init_dpll_parent(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - struct dpll_data *dd; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - v = omap2_clk_readl(clk, dd->control_reg); - v &= dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - /* Reparent the struct clk in case the dpll is in bypass */ - if (_omap2_dpll_is_in_bypass(v)) - return 1; - - return 0; -} - -/** - * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate - * @clk: struct clk * of a DPLL - * - * DPLLs can be locked or bypassed - basically, enabled or disabled. - * When locked, the DPLL output depends on the M and N values. When - * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock - * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and - * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively - * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. - * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is - * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 - * if the clock @clk is not a DPLL. - */ -unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) -{ - long long dpll_clk; - u32 dpll_mult, dpll_div, v; - struct dpll_data *dd; - - dd = clk->dpll_data; - if (!dd) - return 0; - - /* Return bypass rate if DPLL is bypassed */ - v = omap2_clk_readl(clk, dd->control_reg); - v &= dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - if (_omap2_dpll_is_in_bypass(v)) - return __clk_get_rate(dd->clk_bypass); - - v = omap2_clk_readl(clk, dd->mult_div1_reg); - dpll_mult = v & dd->mult_mask; - dpll_mult >>= __ffs(dd->mult_mask); - dpll_div = v & dd->div1_mask; - dpll_div >>= __ffs(dd->div1_mask); - - dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult; - do_div(dpll_clk, dpll_div + 1); - - return dpll_clk; -} - -/* DPLL rate rounding code */ - -/** - * omap2_dpll_round_rate - round a target rate for an OMAP DPLL - * @clk: struct clk * for a DPLL - * @target_rate: desired DPLL clock rate - * - * Given a DPLL and a desired target rate, round the target rate to a - * possible, programmable rate for this DPLL. Attempts to select the - * minimum possible n. Stores the computed (m, n) in the DPLL's - * dpll_data structure so set_rate() will not need to call this - * (expensive) function again. Returns ~0 if the target rate cannot - * be rounded, or the rounded rate upon success. - */ -long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int m, n, r, scaled_max_m; - int min_delta_m = INT_MAX, min_delta_n = INT_MAX; - unsigned long scaled_rt_rp; - unsigned long new_rate = 0; - struct dpll_data *dd; - unsigned long ref_rate; - long delta; - long prev_min_delta = LONG_MAX; - const char *clk_name; - - if (!clk || !clk->dpll_data) - return ~0; - - dd = clk->dpll_data; - - ref_rate = __clk_get_rate(dd->clk_ref); - clk_name = __clk_get_name(hw->clk); - pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", - clk_name, target_rate); - - scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); - scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; - - dd->last_rounded_rate = 0; - - for (n = dd->min_divider; n <= dd->max_divider; n++) { - - /* Is the (input clk, divider) pair valid for the DPLL? */ - r = _dpll_test_fint(clk, n); - if (r == DPLL_FINT_UNDERFLOW) - break; - else if (r == DPLL_FINT_INVALID) - continue; - - /* Compute the scaled DPLL multiplier, based on the divider */ - m = scaled_rt_rp * n; - - /* - * Since we're counting n up, a m overflow means we - * can bail out completely (since as n increases in - * the next iteration, there's no way that m can - * increase beyond the current m) - */ - if (m > scaled_max_m) - break; - - r = _dpll_test_mult(&m, n, &new_rate, target_rate, - ref_rate); - - /* m can't be set low enough for this n - try with a larger n */ - if (r == DPLL_MULT_UNDERFLOW) - continue; - - /* skip rates above our target rate */ - delta = target_rate - new_rate; - if (delta < 0) - continue; - - if (delta < prev_min_delta) { - prev_min_delta = delta; - min_delta_m = m; - min_delta_n = n; - } - - pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n", - clk_name, m, n, new_rate); - - if (delta == 0) - break; - } - - if (prev_min_delta == LONG_MAX) { - pr_debug("clock: %s: cannot round to rate %lu\n", - clk_name, target_rate); - return ~0; - } - - dd->last_rounded_m = min_delta_m; - dd->last_rounded_n = min_delta_n; - dd->last_rounded_rate = target_rate - prev_min_delta; - - return dd->last_rounded_rate; -} - diff --git a/arch/arm/mach-omap2/clkt_iclk.c b/arch/arm/mach-omap2/clkt_iclk.c deleted file mode 100644 index 55eb579aeae1..000000000000 --- a/arch/arm/mach-omap2/clkt_iclk.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * OMAP2/3 interface clock control - * - * Copyright (C) 2011 Nokia Corporation - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include "clock.h" - -/* Register offsets */ -#define CM_AUTOIDLE 0x30 -#define CM_ICLKEN 0x10 - -/* Private functions */ - -/* XXX */ -void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk) -{ - u32 v; - void __iomem *r; - - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); - - v = omap2_clk_readl(clk, r); - v |= (1 << clk->enable_bit); - omap2_clk_writel(v, clk, r); -} - -/* XXX */ -void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) -{ - u32 v; - void __iomem *r; - - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); - - v = omap2_clk_readl(clk, r); - v &= ~(1 << clk->enable_bit); - omap2_clk_writel(v, clk, r); -} - -/* Public data */ - -const struct clk_hw_omap_ops clkhwops_iclk = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, -}; - -const struct clk_hw_omap_ops clkhwops_iclk_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap2_clk_dflt_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - - - diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index a699d7169307..acb60ed17273 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -20,12 +20,11 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/bitops.h> -#include <linux/regmap.h> #include <linux/of_address.h> -#include <linux/bootmem.h> #include <asm/cpu.h> #include <trace/events/power.h> @@ -40,19 +39,8 @@ #include "cm-regbits-34xx.h" #include "common.h" -/* - * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait - * for a module to indicate that it is no longer in idle - */ -#define MAX_MODULE_ENABLE_WAIT 100000 - u16 cpu_mask; -/* - * Clock features setup. Used instead of CPU type checks. - */ -struct ti_clk_features ti_clk_features; - /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ #define OMAP3430_DPLL_FINT_BAND1_MIN 750000 #define OMAP3430_DPLL_FINT_BAND1_MAX 2100000 @@ -66,119 +54,24 @@ struct ti_clk_features ti_clk_features; #define OMAP3PLUS_DPLL_FINT_MIN 32000 #define OMAP3PLUS_DPLL_FINT_MAX 52000000 -/* - * clkdm_control: if true, then when a clock is enabled in the - * hardware, its clockdomain will first be enabled; and when a clock - * is disabled in the hardware, its clockdomain will be disabled - * afterwards. - */ -static bool clkdm_control = true; - -static LIST_HEAD(clk_hw_omap_clocks); - -struct clk_iomap { - struct regmap *regmap; - void __iomem *mem; -}; - -static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS]; - -static void clk_memmap_writel(u32 val, void __iomem *reg) -{ - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; - - if (io->regmap) - regmap_write(io->regmap, r->offset, val); - else - writel_relaxed(val, io->mem + r->offset); -} - -static u32 clk_memmap_readl(void __iomem *reg) -{ - u32 val; - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; - - if (io->regmap) - regmap_read(io->regmap, r->offset, &val); - else - val = readl_relaxed(io->mem + r->offset); - - return val; -} - -void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg) -{ - if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING))) - writel_relaxed(val, reg); - else - clk_memmap_writel(val, reg); -} - -u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg) -{ - if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING))) - return readl_relaxed(reg); - else - return clk_memmap_readl(reg); -} - static struct ti_clk_ll_ops omap_clk_ll_ops = { - .clk_readl = clk_memmap_readl, - .clk_writel = clk_memmap_writel, + .clkdm_clk_enable = clkdm_clk_enable, + .clkdm_clk_disable = clkdm_clk_disable, + .cm_wait_module_ready = omap_cm_wait_module_ready, + .cm_split_idlest_reg = cm_split_idlest_reg, }; /** - * omap2_clk_provider_init - initialize a clock provider - * @match_table: DT device table to match for devices to init - * @np: device node pointer for the this clock provider - * @index: index for the clock provider - + @syscon: syscon regmap pointer - * @mem: iomem pointer for the clock provider memory area, only used if - * syscon is not provided + * omap2_clk_setup_ll_ops - setup clock driver low-level ops * - * Initializes a clock provider module (CM/PRM etc.), registering - * the memory mapping at specified index and initializing the - * low level driver infrastructure. Returns 0 in success. + * Sets up clock driver low-level platform ops. These are needed + * for register accesses and various other misc platform operations. + * Returns 0 on success, -EBUSY if low level ops have been registered + * already. */ -int __init omap2_clk_provider_init(struct device_node *np, int index, - struct regmap *syscon, void __iomem *mem) +int __init omap2_clk_setup_ll_ops(void) { - struct clk_iomap *io; - - ti_clk_ll_ops = &omap_clk_ll_ops; - - io = kzalloc(sizeof(*io), GFP_KERNEL); - - io->regmap = syscon; - io->mem = mem; - - clk_memmaps[index] = io; - - ti_dt_clk_init_provider(np, index); - - return 0; -} - -/** - * omap2_clk_legacy_provider_init - initialize a legacy clock provider - * @index: index for the clock provider - * @mem: iomem pointer for the clock provider memory area - * - * Initializes a legacy clock provider memory mapping. - */ -void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) -{ - struct clk_iomap *io; - - ti_clk_ll_ops = &omap_clk_ll_ops; - - io = memblock_virt_alloc(sizeof(*io), 0); - - io->mem = mem; - - clk_memmaps[index] = io; + return ti_clk_setup_ll_ops(&omap_clk_ll_ops); } /* @@ -187,77 +80,6 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) /* Private functions */ - -/** - * _wait_idlest_generic - wait for a module to leave the idle state - * @clk: module clock to wait for (needed for register offsets) - * @reg: virtual address of module IDLEST register - * @mask: value to mask against to determine if the module is active - * @idlest: idle state indicator (0 or 1) for the clock - * @name: name of the clock (for printk) - * - * Wait for a module to leave idle, where its idle-status register is - * not inside the CM module. Returns 1 if the module left idle - * promptly, or 0 if the module did not leave idle before the timeout - * elapsed. XXX Deprecated - should be moved into drivers for the - * individual IP block that the IDLEST register exists in. - */ -static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, - u32 mask, u8 idlest, const char *name) -{ - int i = 0, ena = 0; - - ena = (idlest) ? 0 : mask; - - omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena), - MAX_MODULE_ENABLE_WAIT, i); - - if (i < MAX_MODULE_ENABLE_WAIT) - pr_debug("omap clock: module associated with clock %s ready after %d loops\n", - name, i); - else - pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n", - name, MAX_MODULE_ENABLE_WAIT); - - return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; -}; - -/** - * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE - * @clk: struct clk * belonging to the module - * - * If the necessary clocks for the OMAP hardware IP block that - * corresponds to clock @clk are enabled, then wait for the module to - * indicate readiness (i.e., to leave IDLE). This code does not - * belong in the clock code and will be moved in the medium term to - * module-dependent code. No return value. - */ -static void _omap2_module_wait_ready(struct clk_hw_omap *clk) -{ - void __iomem *companion_reg, *idlest_reg; - u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; - s16 prcm_mod; - int r; - - /* Not all modules have multiple clocks that their IDLEST depends on */ - if (clk->ops->find_companion) { - clk->ops->find_companion(clk, &companion_reg, &other_bit); - if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit))) - return; - } - - clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); - r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); - if (r) { - /* IDLEST register not in the CM module */ - _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), - idlest_val, __clk_get_name(clk->hw.clk)); - } else { - omap_cm_wait_module_ready(0, prcm_mod, idlest_reg_id, - idlest_bit); - }; -} - /* Public functions */ /** @@ -290,279 +112,6 @@ void omap2_init_clk_clkdm(struct clk_hw *hw) } } -/** - * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable - * - * Prevent the OMAP clock code from calling into the clockdomain code - * when a hardware clock in that clockdomain is enabled or disabled. - * Intended to be called at init time from omap*_clk_init(). No - * return value. - */ -void __init omap2_clk_disable_clkdm_control(void) -{ - clkdm_control = false; -} - -/** - * omap2_clk_dflt_find_companion - find companion clock to @clk - * @clk: struct clk * to find the companion clock of - * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in - * @other_bit: u8 ** to return the companion clock bit shift in - * - * Note: We don't need special code here for INVERT_ENABLE for the - * time being since INVERT_ENABLE only applies to clocks enabled by - * CM_CLKEN_PLL - * - * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's - * just a matter of XORing the bits. - * - * Some clocks don't have companion clocks. For example, modules with - * only an interface clock (such as MAILBOXES) don't have a companion - * clock. Right now, this code relies on the hardware exporting a bit - * in the correct companion register that indicates that the - * nonexistent 'companion clock' is active. Future patches will - * associate this type of code with per-module data structures to - * avoid this issue, and remove the casts. No return value. - */ -void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, u8 *other_bit) -{ - u32 r; - - /* - * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes - * it's just a matter of XORing the bits. - */ - r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); - - *other_reg = (__force void __iomem *)r; - *other_bit = clk->enable_bit; -} - -/** - * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk - * @clk: struct clk * to find IDLEST info for - * @idlest_reg: void __iomem ** to return the CM_IDLEST va in - * @idlest_bit: u8 * to return the CM_IDLEST bit shift in - * @idlest_val: u8 * to return the idle status indicator - * - * Return the CM_IDLEST register address and bit shift corresponding - * to the module that "owns" this clock. This default code assumes - * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that - * the IDLEST register address ID corresponds to the CM_*CLKEN - * register address ID (e.g., that CM_FCLKEN2 corresponds to - * CM_IDLEST2). This is not true for all modules. No return value. - */ -void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = clk->enable_bit; - - /* - * 24xx uses 0 to indicate not ready, and 1 to indicate ready. - * 34xx reverses this, just to keep us on our toes - * AM35xx uses both, depending on the module. - */ - *idlest_val = ti_clk_features.cm_idlest_val; -} - -/** - * omap2_dflt_clk_enable - enable a clock in the hardware - * @hw: struct clk_hw * of the clock to enable - * - * Enable the clock @hw in the hardware. We first call into the OMAP - * clockdomain code to "enable" the corresponding clockdomain if this - * is the first enabled user of the clockdomain. Then program the - * hardware to enable the clock. Then wait for the IP block that uses - * this clock to leave idle (if applicable). Returns the error value - * from clkdm_clk_enable() if it terminated with an error, or -EINVAL - * if @hw has a null clock enable_reg, or zero upon success. - */ -int omap2_dflt_clk_enable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - u32 v; - int ret = 0; - - clk = to_clk_hw_omap(hw); - - if (clkdm_control && clk->clkdm) { - ret = clkdm_clk_enable(clk->clkdm, hw->clk); - if (ret) { - WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), - clk->clkdm->name, ret); - return ret; - } - } - - if (unlikely(clk->enable_reg == NULL)) { - pr_err("%s: %s missing enable_reg\n", __func__, - __clk_get_name(hw->clk)); - ret = -EINVAL; - goto err; - } - - /* FIXME should not have INVERT_ENABLE bit here */ - v = omap2_clk_readl(clk, clk->enable_reg); - if (clk->flags & INVERT_ENABLE) - v &= ~(1 << clk->enable_bit); - else - v |= (1 << clk->enable_bit); - omap2_clk_writel(v, clk, clk->enable_reg); - v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */ - - if (clk->ops && clk->ops->find_idlest) - _omap2_module_wait_ready(clk); - - return 0; - -err: - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); - return ret; -} - -/** - * omap2_dflt_clk_disable - disable a clock in the hardware - * @hw: struct clk_hw * of the clock to disable - * - * Disable the clock @hw in the hardware, and call into the OMAP - * clockdomain code to "disable" the corresponding clockdomain if all - * clocks/hwmods in that clockdomain are now disabled. No return - * value. - */ -void omap2_dflt_clk_disable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - u32 v; - - clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { - /* - * 'independent' here refers to a clock which is not - * controlled by its parent. - */ - pr_err("%s: independent clock %s has no enable_reg\n", - __func__, __clk_get_name(hw->clk)); - return; - } - - v = omap2_clk_readl(clk, clk->enable_reg); - if (clk->flags & INVERT_ENABLE) - v |= (1 << clk->enable_bit); - else - v &= ~(1 << clk->enable_bit); - omap2_clk_writel(v, clk, clk->enable_reg); - /* No OCP barrier needed here since it is a disable operation */ - - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); -} - -/** - * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw - * @hw: struct clk_hw * of the clock being enabled - * - * Increment the usecount of the clockdomain of the clock pointed to - * by @hw; if the usecount is 1, the clockdomain will be "enabled." - * Only needed for clocks that don't use omap2_dflt_clk_enable() as - * their enable function pointer. Passes along the return value of - * clkdm_clk_enable(), -EINVAL if @hw is not associated with a - * clockdomain, or 0 if clock framework-based clockdomain control is - * not implemented. - */ -int omap2_clkops_enable_clkdm(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - int ret = 0; - - clk = to_clk_hw_omap(hw); - - if (unlikely(!clk->clkdm)) { - pr_err("%s: %s: no clkdm set ?!\n", __func__, - __clk_get_name(hw->clk)); - return -EINVAL; - } - - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, - __clk_get_name(hw->clk)); - - if (!clkdm_control) { - pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", - __func__, __clk_get_name(hw->clk)); - return 0; - } - - ret = clkdm_clk_enable(clk->clkdm, hw->clk); - WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret); - - return ret; -} - -/** - * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw - * @hw: struct clk_hw * of the clock being disabled - * - * Decrement the usecount of the clockdomain of the clock pointed to - * by @hw; if the usecount is 0, the clockdomain will be "disabled." - * Only needed for clocks that don't use omap2_dflt_clk_disable() as their - * disable function pointer. No return value. - */ -void omap2_clkops_disable_clkdm(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - - clk = to_clk_hw_omap(hw); - - if (unlikely(!clk->clkdm)) { - pr_err("%s: %s: no clkdm set ?!\n", __func__, - __clk_get_name(hw->clk)); - return; - } - - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, - __clk_get_name(hw->clk)); - - if (!clkdm_control) { - pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", - __func__, __clk_get_name(hw->clk)); - return; - } - - clkdm_clk_disable(clk->clkdm, hw->clk); -} - -/** - * omap2_dflt_clk_is_enabled - is clock enabled in the hardware? - * @hw: struct clk_hw * to check - * - * Return 1 if the clock represented by @hw is enabled in the - * hardware, or 0 otherwise. Intended for use in the struct - * clk_ops.is_enabled function pointer. - */ -int omap2_dflt_clk_is_enabled(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - - v = omap2_clk_readl(clk, clk->enable_reg); - - if (clk->flags & INVERT_ENABLE) - v ^= BIT(clk->enable_bit); - - v &= BIT(clk->enable_bit); - - return v ? 1 : 0; -} - static int __initdata mpurate; /* @@ -584,178 +133,6 @@ static int __init omap_clk_setup(char *str) __setup("mpurate=", omap_clk_setup); /** - * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock - * @clk: struct clk * to initialize - * - * Add an OMAP clock @clk to the internal list of OMAP clocks. Used - * temporarily for autoidle handling, until this support can be - * integrated into the common clock framework code in some way. No - * return value. - */ -void omap2_init_clk_hw_omap_clocks(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - list_add(&c->node, &clk_hw_omap_clocks); -} - -/** - * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that - * support it - * - * Enable clock autoidle on all OMAP clocks that have allow_idle - * function pointers associated with them. This function is intended - * to be temporary until support for this is added to the common clock - * code. Returns 0. - */ -int omap2_clk_enable_autoidle_all(void) -{ - struct clk_hw_omap *c; - - list_for_each_entry(c, &clk_hw_omap_clocks, node) - if (c->ops && c->ops->allow_idle) - c->ops->allow_idle(c); - - of_ti_clk_allow_autoidle_all(); - - return 0; -} - -/** - * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that - * support it - * - * Disable clock autoidle on all OMAP clocks that have allow_idle - * function pointers associated with them. This function is intended - * to be temporary until support for this is added to the common clock - * code. Returns 0. - */ -int omap2_clk_disable_autoidle_all(void) -{ - struct clk_hw_omap *c; - - list_for_each_entry(c, &clk_hw_omap_clocks, node) - if (c->ops && c->ops->deny_idle) - c->ops->deny_idle(c); - - of_ti_clk_deny_autoidle_all(); - - return 0; -} - -/** - * omap2_clk_deny_idle - disable autoidle on an OMAP clock - * @clk: struct clk * to disable autoidle for - * - * Disable autoidle on an OMAP clock. - */ -int omap2_clk_deny_idle(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return -EINVAL; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - if (c->ops && c->ops->deny_idle) - c->ops->deny_idle(c); - return 0; -} - -/** - * omap2_clk_allow_idle - enable autoidle on an OMAP clock - * @clk: struct clk * to enable autoidle for - * - * Enable autoidle on an OMAP clock. - */ -int omap2_clk_allow_idle(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return -EINVAL; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - if (c->ops && c->ops->allow_idle) - c->ops->allow_idle(c); - return 0; -} - -/** - * omap2_clk_enable_init_clocks - prepare & enable a list of clocks - * @clk_names: ptr to an array of strings of clock names to enable - * @num_clocks: number of clock names in @clk_names - * - * Prepare and enable a list of clocks, named by @clk_names. No - * return value. XXX Deprecated; only needed until these clocks are - * properly claimed and enabled by the drivers or core code that uses - * them. XXX What code disables & calls clk_put on these clocks? - */ -void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) -{ - struct clk *init_clk; - int i; - - for (i = 0; i < num_clocks; i++) { - init_clk = clk_get(NULL, clk_names[i]); - if (WARN(IS_ERR(init_clk), "could not find init clock %s\n", - clk_names[i])) - continue; - clk_prepare_enable(init_clk); - } -} - -const struct clk_hw_omap_ops clkhwops_wait = { - .find_idlest = omap2_clk_dflt_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument - * @mpurate_ck_name: clk name of the clock to change rate - * - * Change the ARM MPU clock rate to the rate specified on the command - * line, if one was specified. @mpurate_ck_name should be - * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx. - * XXX Does not handle voltage scaling - on OMAP2xxx this is currently - * handled by the virt_prcm_set clock, but this should be handled by - * the OPP layer. XXX This is intended to be handled by the OPP layer - * code in the near future and should be removed from the clock code. - * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects - * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name - * cannot be found, or 0 upon success. - */ -int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) -{ - struct clk *mpurate_ck; - int r; - - if (!mpurate) - return -EINVAL; - - mpurate_ck = clk_get(NULL, mpurate_ck_name); - if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name)) - return -ENOENT; - - r = clk_set_rate(mpurate_ck, mpurate); - if (r < 0) { - WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", - mpurate_ck_name, mpurate, r); - clk_put(mpurate_ck); - return -EINVAL; - } - - calibrate_delay(); - clk_put(mpurate_ck); - - return 0; -} - -/** * omap2_clk_print_new_rates - print summary of current clock tree rates * @hfclkin_ck_name: clk name for the off-chip HF oscillator * @core_ck_name: clk name for the on-chip CORE_CLK @@ -801,29 +178,30 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name, */ void __init ti_clk_init_features(void) { + struct ti_clk_features features = { 0 }; /* Fint setup for DPLLs */ if (cpu_is_omap3430()) { - ti_clk_features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; - ti_clk_features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; - ti_clk_features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX; - ti_clk_features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN; + features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; + features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; + features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX; + features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN; } else { - ti_clk_features.fint_min = OMAP3PLUS_DPLL_FINT_MIN; - ti_clk_features.fint_max = OMAP3PLUS_DPLL_FINT_MAX; + features.fint_min = OMAP3PLUS_DPLL_FINT_MIN; + features.fint_max = OMAP3PLUS_DPLL_FINT_MAX; } /* Bypass value setup for DPLLs */ if (cpu_is_omap24xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP2XXX_EN_DPLL_LPBYPASS) | (1 << OMAP2XXX_EN_DPLL_FRBYPASS); } else if (cpu_is_omap34xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP3XXX_EN_DPLL_LPBYPASS) | (1 << OMAP3XXX_EN_DPLL_FRBYPASS); } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() || soc_is_omap54xx() || soc_is_dra7xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP4XXX_EN_DPLL_LPBYPASS) | (1 << OMAP4XXX_EN_DPLL_FRBYPASS) | (1 << OMAP4XXX_EN_DPLL_MNBYPASS); @@ -831,7 +209,7 @@ void __init ti_clk_init_features(void) /* Jitter correction only available on OMAP343X */ if (cpu_is_omap343x()) - ti_clk_features.flags |= TI_CLK_DPLL_HAS_FREQSEL; + features.flags |= TI_CLK_DPLL_HAS_FREQSEL; /* Idlest value for interface clocks. * 24xx uses 0 to indicate not ready, and 1 to indicate ready. @@ -839,11 +217,13 @@ void __init ti_clk_init_features(void) * AM35xx uses both, depending on the module. */ if (cpu_is_omap24xx()) - ti_clk_features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL; + features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL; else if (cpu_is_omap34xx()) - ti_clk_features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL; + features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL; /* On OMAP3430 ES1.0, DPLL4 can't be re-programmed */ if (omap_rev() == OMAP3430_REV_ES1_0) - ti_clk_features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM; + features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM; + + ti_clk_setup_features(&features); } diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index 652ed0ab86ec..67da640ba1c7 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -23,90 +23,6 @@ #include <linux/clk-provider.h> #include <linux/clk/ti.h> -struct omap_clk { - u16 cpu; - struct clk_lookup lk; -}; - -#define CLK(dev, con, ck) \ - { \ - .lk = { \ - .dev_id = dev, \ - .con_id = con, \ - .clk = ck, \ - }, \ - } - -struct clockdomain; - -#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \ - static struct clk_core _name##_core = { \ - .name = #_name, \ - .hw = &_name##_hw.hw, \ - .parent_names = _parent_array_name, \ - .num_parents = ARRAY_SIZE(_parent_array_name), \ - .ops = &_clkops_name, \ - }; \ - static struct clk _name = { \ - .core = &_name##_core, \ - }; - -#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \ - _clkops_name, _flags) \ - static struct clk_core _name##_core = { \ - .name = #_name, \ - .hw = &_name##_hw.hw, \ - .parent_names = _parent_array_name, \ - .num_parents = ARRAY_SIZE(_parent_array_name), \ - .ops = &_clkops_name, \ - .flags = _flags, \ - }; \ - static struct clk _name = { \ - .core = &_name##_core, \ - }; - -#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .clkdm_name = _clkdm_name, \ - }; - -#define DEFINE_CLK_OMAP_MUX(_name, _clkdm_name, _clksel, \ - _clksel_reg, _clksel_mask, \ - _parent_names, _ops) \ - static struct clk _name; \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .clksel = _clksel, \ - .clksel_reg = _clksel_reg, \ - .clksel_mask = _clksel_mask, \ - .clkdm_name = _clkdm_name, \ - }; \ - DEFINE_STRUCT_CLK(_name, _parent_names, _ops); - -#define DEFINE_CLK_OMAP_MUX_GATE(_name, _clkdm_name, _clksel, \ - _clksel_reg, _clksel_mask, \ - _enable_reg, _enable_bit, \ - _hwops, _parent_names, _ops) \ - static struct clk _name; \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .ops = _hwops, \ - .enable_reg = _enable_reg, \ - .enable_bit = _enable_bit, \ - .clksel = _clksel, \ - .clksel_reg = _clksel_reg, \ - .clksel_mask = _clksel_mask, \ - .clkdm_name = _clkdm_name, \ - }; \ - DEFINE_STRUCT_CLK(_name, _parent_names, _ops); - /* struct clksel_rate.flags possibilities */ #define RATE_IN_242X (1 << 0) #define RATE_IN_243X (1 << 1) @@ -127,38 +43,6 @@ struct clockdomain; /* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */ #define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX) - -/** - * struct clksel_rate - register bitfield values corresponding to clk divisors - * @val: register bitfield value (shifted to bit 0) - * @div: clock divisor corresponding to @val - * @flags: (see "struct clksel_rate.flags possibilities" above) - * - * @val should match the value of a read from struct clk.clksel_reg - * AND'ed with struct clk.clksel_mask, shifted right to bit 0. - * - * @div is the divisor that should be applied to the parent clock's rate - * to produce the current clock's rate. - */ -struct clksel_rate { - u32 val; - u8 div; - u16 flags; -}; - -/** - * struct clksel - available parent clocks, and a pointer to their divisors - * @parent: struct clk * to a possible parent clock - * @rates: available divisors for this parent clock - * - * A struct clksel is always associated with one or more struct clks - * and one or more struct clksel_rates. - */ -struct clksel { - struct clk *parent; - const struct clksel_rate *rates; -}; - /* CM_CLKSEL2_PLL.CORE_CLK_SRC bits (2XXX) */ #define CORE_CLK_SRC_32K 0x0 #define CORE_CLK_SRC_DPLL 0x1 @@ -180,105 +64,18 @@ struct clksel { #define OMAP4XXX_EN_DPLL_FRBYPASS 0x6 #define OMAP4XXX_EN_DPLL_LOCKED 0x7 -u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk); -void omap3_dpll_allow_idle(struct clk_hw_omap *clk); -void omap3_dpll_deny_idle(struct clk_hw_omap *clk); -void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk); -void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk); - -void __init omap2_clk_disable_clkdm_control(void); - -/* clkt_clksel.c public functions */ -u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk, - unsigned long target_rate, - u32 *new_div); -u8 omap2_clksel_find_parent_index(struct clk_hw *hw); -unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate); -long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate); -int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate); -int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val); - -/* clkt_iclk.c public functions */ -extern void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk); -extern void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk); - -unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); - -void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, - u8 *other_bit); -void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, u8 *idlest_val); -int omap2_clk_enable_autoidle_all(void); -int omap2_clk_allow_idle(struct clk *clk); -int omap2_clk_deny_idle(struct clk *clk); -int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name); void omap2_clk_print_new_rates(const char *hfclkin_ck_name, const char *core_ck_name, const char *mpu_ck_name); -u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg); -void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg); - extern u16 cpu_mask; -/* - * Clock features setup. Used instead of CPU type checks. - */ -struct ti_clk_features { - u32 flags; - long fint_min; - long fint_max; - long fint_band1_max; - long fint_band2_min; - u8 dpll_bypass_vals; - u8 cm_idlest_val; -}; - -#define TI_CLK_DPLL_HAS_FREQSEL (1 << 0) -#define TI_CLK_DPLL4_DENY_REPROGRAM (1 << 1) - -extern struct ti_clk_features ti_clk_features; - extern const struct clkops clkops_omap2_dflt_wait; extern const struct clkops clkops_omap2_dflt; extern struct clk_functions omap2_clk_functions; -extern const struct clksel_rate gpt_32k_rates[]; -extern const struct clksel_rate gpt_sys_rates[]; -extern const struct clksel_rate gfx_l3_rates[]; -extern const struct clksel_rate dsp_ick_rates[]; - -extern const struct clk_hw_omap_ops clkhwops_iclk_wait; -extern const struct clk_hw_omap_ops clkhwops_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait; -extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; -extern const struct clk_hw_omap_ops clkhwops_apll54; -extern const struct clk_hw_omap_ops clkhwops_apll96; - -/* clksel_rate blocks shared between OMAP44xx and AM33xx */ -extern const struct clksel_rate div_1_0_rates[]; -extern const struct clksel_rate div3_1to4_rates[]; -extern const struct clksel_rate div_1_1_rates[]; -extern const struct clksel_rate div_1_2_rates[]; -extern const struct clksel_rate div_1_3_rates[]; -extern const struct clksel_rate div_1_4_rates[]; -extern const struct clksel_rate div31_1to31_rates[]; - -extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); -extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); - -struct regmap; - -int __init omap2_clk_provider_init(struct device_node *np, int index, - struct regmap *syscon, void __iomem *mem); -void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem); +int __init omap2_clk_setup_ll_ops(void); void __init ti_clk_init_features(void); #endif diff --git a/arch/arm/mach-omap2/clock2430.c b/arch/arm/mach-omap2/clock2430.c deleted file mode 100644 index cef0c8d1de52..000000000000 --- a/arch/arm/mach-omap2/clock2430.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * clock2430.c - OMAP2430-specific clock integration code - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, - * Gordon McNutt and RidgeRun, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "iomap.h" -#include "clock.h" -#include "clock2xxx.h" -#include "cm2xxx.h" -#include "cm-regbits-24xx.h" - -/** - * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the - * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function - * passes back the correct CM_IDLEST register address for I2CHS - * modules. No return value. - */ -static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - *idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST); - *idlest_bit = clk->enable_bit; - *idlest_val = OMAP24XX_CM_IDLEST_VAL; -} - -/* 2430 I2CHS has non-standard IDLEST register */ -const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = { - .find_idlest = omap2430_clk_i2chs_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c deleted file mode 100644 index b870f6a9e283..000000000000 --- a/arch/arm/mach-omap2/clock2xxx.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * clock2xxx.c - OMAP2xxx-specific clock integration code - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, - * Gordon McNutt and RidgeRun, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "clock.h" -#include "clock2xxx.h" -#include "cm.h" -#include "cm-regbits-24xx.h" - -struct clk_hw *dclk_hw; -/* - * Omap24xx specific clock functions - */ - -/* - * Switch the MPU rate if specified on cmdline. We cannot do this - * early until cmdline is parsed. XXX This should be removed from the - * clock code and handled by the OPP layer code in the near future. - */ -static int __init omap2xxx_clk_arch_init(void) -{ - int ret; - - if (!cpu_is_omap24xx()) - return 0; - - ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); - if (!ret) - omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); - - return ret; -} - -omap_arch_initcall(omap2xxx_clk_arch_init); - - diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c deleted file mode 100644 index 4596468e50ab..000000000000 --- a/arch/arm/mach-omap2/clock34xx.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * OMAP3-specific clock framework functions - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - * - * Paul Walmsley - * Jouni Högander - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock34xx.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" - -/** - * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift - * from the CM_{I,F}CLKEN bit. Pass back the correct info via - * @idlest_reg and @idlest_bit. No return value. - */ -static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} -const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = { - .find_idlest = omap3430es2_clk_ssi_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_ssi_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * Some OMAP modules on OMAP3 ES2+ chips have both initiator and - * target IDLEST bits. For our purposes, we are concerned with the - * target IDLEST bits, which exist at a different bit position than - * the *CLKEN bit position for these modules (DSS and USBHOST) (The - * default find_idlest code assumes that they are at the same - * position.) No return value. - */ -static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - /* USBHOST_IDLE has same shift */ - *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = { - .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different - * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via - * @idlest_reg and @idlest_bit. No return value. - */ -static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = { - .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h deleted file mode 100644 index 084ba71b2b31..000000000000 --- a/arch/arm/mach-omap2/clock34xx.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * OMAP34xx clock function prototypes and macros - * - * Copyright (C) 2007-2010 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H - -extern const struct clkops clkops_omap3430es2_ssi_wait; -extern const struct clkops clkops_omap3430es2_iclk_ssi_wait; -extern const struct clkops clkops_omap3430es2_hsotgusb_wait; -extern const struct clkops clkops_omap3430es2_iclk_hsotgusb_wait; -extern const struct clkops clkops_omap3430es2_dss_usbhost_wait; -extern const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait; - -#endif diff --git a/arch/arm/mach-omap2/clock3517.c b/arch/arm/mach-omap2/clock3517.c deleted file mode 100644 index 4d79ae2c0241..000000000000 --- a/arch/arm/mach-omap2/clock3517.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * OMAP3517/3505-specific clock framework functions - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2011 Nokia Corporation - * - * Ranjith Lohithakshan - * Paul Walmsley - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock3517.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" - -/* - * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported - * in the same register at a bit offset of 0x8. The EN_ACK for ICK is - * at an offset of 4 from ICK enable bit. - */ -#define AM35XX_IPSS_ICK_MASK 0xF -#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4 -#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8 -#define AM35XX_IPSS_CLK_IDLEST_VAL 0 - -/** - * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The interface clocks on AM35xx IPSS reflects the clock idle status - * in the enable register itsel at a bit offset of 4 from the enable - * bit. A value of 1 indicates that clock is enabled. - */ -static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - *idlest_reg = (__force void __iomem *)(clk->enable_reg); - *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET; - *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL; -} - -/** - * am35xx_clk_find_companion - find companion clock to @clk - * @clk: struct clk * to find the companion clock of - * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in - * @other_bit: u8 ** to return the companion clock bit shift in - * - * Some clocks don't have companion clocks. For example, modules with - * only an interface clock (such as HECC) don't have a companion - * clock. Right now, this code relies on the hardware exporting a bit - * in the correct companion register that indicates that the - * nonexistent 'companion clock' is active. Future patches will - * associate this type of code with per-module data structures to - * avoid this issue, and remove the casts. No return value. - */ -static void am35xx_clk_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, - u8 *other_bit) -{ - *other_reg = (__force void __iomem *)(clk->enable_reg); - if (clk->enable_bit & AM35XX_IPSS_ICK_MASK) - *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET; - else - *other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET; -} -const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = { - .find_idlest = am35xx_clk_find_idlest, - .find_companion = am35xx_clk_find_companion, -}; - -/** - * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The IPSS target CM_IDLEST bit is at a different shift from the - * CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg - * and @idlest_bit. No return value. - */ -static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = AM35XX_ST_IPSS_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = am35xx_clk_ipss_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock3517.h b/arch/arm/mach-omap2/clock3517.h deleted file mode 100644 index ca5e5a64c2e2..000000000000 --- a/arch/arm/mach-omap2/clock3517.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * OMAP3517/3505 clock function prototypes and macros - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK3517_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK3517_H - -extern const struct clkops clkops_am35xx_ipss_module_wait; -extern const struct clkops clkops_am35xx_ipss_wait; - -#endif diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c deleted file mode 100644 index 91ccb962e09e..000000000000 --- a/arch/arm/mach-omap2/clock36xx.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * OMAP36xx-specific clkops - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - * - * Mike Turquette - * Vijaykumar GN - * Paul Walmsley - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock36xx.h" -#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) - -/** - * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering - * from HSDivider PWRDN problem Implements Errata ID: i556. - * @clk: DPLL output struct clk - * - * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck, - * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset - * valueafter their respective PWRDN bits are set. Any dummy write - * (Any other value different from the Read value) to the - * corresponding CM_CLKSEL register will refresh the dividers. - */ -int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) -{ - struct clk_divider *parent; - struct clk_hw *parent_hw; - u32 dummy_v, orig_v; - struct clk_hw_omap *omap_clk = to_clk_hw_omap(clk); - int ret; - - /* Clear PWRDN bit of HSDIVIDER */ - ret = omap2_dflt_clk_enable(clk); - - parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); - parent = to_clk_divider(parent_hw); - - /* Restore the dividers */ - if (!ret) { - orig_v = omap2_clk_readl(omap_clk, parent->reg); - dummy_v = orig_v; - - /* Write any other value different from the Read value */ - dummy_v ^= (1 << parent->shift); - omap2_clk_writel(dummy_v, omap_clk, parent->reg); - - /* Write the original divider */ - omap2_clk_writel(orig_v, omap_clk, parent->reg); - } - - return ret; -} diff --git a/arch/arm/mach-omap2/clock36xx.h b/arch/arm/mach-omap2/clock36xx.h deleted file mode 100644 index 945bb7f083e9..000000000000 --- a/arch/arm/mach-omap2/clock36xx.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * OMAP36xx clock function prototypes and macros - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H - -extern int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *hw); - -#endif diff --git a/arch/arm/mach-omap2/clock3xxx.c b/arch/arm/mach-omap2/clock3xxx.c deleted file mode 100644 index a9e86db5daf9..000000000000 --- a/arch/arm/mach-omap2/clock3xxx.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * OMAP3-specific clock framework functions - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Copyright (C) 2007-2010 Nokia Corporation - * - * Paul Walmsley - * Jouni Högander - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "clock.h" -#include "clock3xxx.h" -#include "prm2xxx_3xxx.h" -#include "prm-regbits-34xx.h" -#include "cm2xxx_3xxx.h" -#include "cm-regbits-34xx.h" - -/* - * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks - * that are sourced by DPLL5, and both of these require this clock - * to be at 120 MHz for proper operation. - */ -#define DPLL5_FREQ_FOR_USBHOST 120000000 - -/* needed by omap3_core_dpll_m2_set_rate() */ -struct clk *sdrc_ick_p, *arm_fck_p; - -/** - * omap3_dpll4_set_rate - set rate for omap3 per-dpll - * @hw: clock to change - * @rate: target rate for clock - * @parent_rate: rate of the parent clock - * - * Check if the current SoC supports the per-dpll reprogram operation - * or not, and then do the rate change if supported. Returns -EINVAL - * if not supported, 0 for success, and potential error codes from the - * clock rate change. - */ -int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - /* - * According to the 12-5 CDP code from TI, "Limitation 2.5" - * on 3430ES1 prevents us from changing DPLL multipliers or dividers - * on DPLL4. - */ - if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) { - pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); - return -EINVAL; - } - - return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); -} - -/** - * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll - * @hw: clock to change - * @rate: target rate for clock - * @parent_rate: rate of the parent clock - * @index: parent index, 0 - reference clock, 1 - bypass clock - * - * Check if the current SoC support the per-dpll reprogram operation - * or not, and then do the rate + parent change if supported. Returns - * -EINVAL if not supported, 0 for success, and potential error codes - * from the clock rate change. - */ -int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, u8 index) -{ - if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) { - pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); - return -EINVAL; - } - - return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate, - index); -} - -void __init omap3_clk_lock_dpll5(void) -{ - struct clk *dpll5_clk; - struct clk *dpll5_m2_clk; - - dpll5_clk = clk_get(NULL, "dpll5_ck"); - clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); - clk_prepare_enable(dpll5_clk); - - /* Program dpll5_m2_clk divider for no division */ - dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); - clk_prepare_enable(dpll5_m2_clk); - clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); - - clk_disable_unprepare(dpll5_m2_clk); - clk_disable_unprepare(dpll5_clk); - return; -} - -/* Common clock code */ - -/* - * Switch the MPU rate if specified on cmdline. We cannot do this - * early until cmdline is parsed. XXX This should be removed from the - * clock code and handled by the OPP layer code in the near future. - */ -static int __init omap3xxx_clk_arch_init(void) -{ - int ret; - - if (!cpu_is_omap34xx()) - return 0; - - ret = omap2_clk_switch_mpurate_at_boot("dpll1_ck"); - if (!ret) - omap2_clk_print_new_rates("osc_sys_ck", "core_ck", "arm_fck"); - - return ret; -} - -omap_arch_initcall(omap3xxx_clk_arch_init); - - diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h deleted file mode 100644 index 287a46f78d97..000000000000 --- a/arch/arm/mach-omap2/clock44xx.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * OMAP4 clock function prototypes and macros - * - * Copyright (C) 2009 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H - -/* - * OMAP4430_REGM4XEN_MULT: If the CM_CLKMODE_DPLL_ABE.DPLL_REGM4XEN bit is - * set, then the DPLL's lock frequency is multiplied by 4 (OMAP4430 TRM - * vV Section 3.6.3.3.1 "DPLLs Output Clocks Parameters") - */ -#define OMAP4430_REGM4XEN_MULT 4 - -int omap4xxx_clk_init(void); - -#endif diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c deleted file mode 100644 index 61b60dfb14ce..000000000000 --- a/arch/arm/mach-omap2/clock_common_data.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * linux/arch/arm/mach-omap2/clock_common_data.c - * - * Copyright (C) 2005-2009 Texas Instruments, Inc. - * Copyright (C) 2004-2009 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This file contains clock data that is common to both the OMAP2xxx and - * OMAP3xxx clock definition files. - */ - -#include "clock.h" - -/* clksel_rate data common to 24xx/343x */ -const struct clksel_rate gpt_32k_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate gpt_sys_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate gfx_l3_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate dsp_ick_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX }, - { .div = 2, .val = 2, .flags = RATE_IN_24XX }, - { .div = 3, .val = 3, .flags = RATE_IN_243X }, - { .div = 0 }, -}; - - -/* clksel_rate blocks shared between OMAP44xx and AM33xx */ - -const struct clksel_rate div_1_0_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div3_1to4_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_4430 }, - { .div = 2, .val = 1, .flags = RATE_IN_4430 }, - { .div = 4, .val = 2, .flags = RATE_IN_4430 }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_1_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_2_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_3_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_4_rates[] = { - { .div = 1, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div31_1to31_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 2, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 3, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 4, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 5, .val = 5, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 6, .val = 6, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 7, .val = 7, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 8, .val = 8, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 9, .val = 9, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 10, .val = 10, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 11, .val = 11, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 12, .val = 12, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 13, .val = 13, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 14, .val = 14, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 15, .val = 15, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 16, .val = 16, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 17, .val = 17, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 18, .val = 18, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 19, .val = 19, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 20, .val = 20, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 21, .val = 21, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 22, .val = 22, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 23, .val = 23, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 24, .val = 24, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 25, .val = 25, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 26, .val = 26, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 27, .val = 27, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 28, .val = 28, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 29, .val = 29, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 30, .val = 30, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c deleted file mode 100644 index 44e57ec225d4..000000000000 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ /dev/null @@ -1,818 +0,0 @@ -/* - * OMAP3/4 - specific DPLL control functions - * - * Copyright (C) 2009-2010 Texas Instruments, Inc. - * Copyright (C) 2009-2010 Nokia Corporation - * - * Written by Paul Walmsley - * Testing and integration fixes by Jouni Högander - * - * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth - * Menon - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/list.h> -#include <linux/errno.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/bitops.h> -#include <linux/clkdev.h> - -#include "clockdomain.h" -#include "clock.h" - -/* CM_AUTOIDLE_PLL*.AUTO_* bit values */ -#define DPLL_AUTOIDLE_DISABLE 0x0 -#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 - -#define MAX_DPLL_WAIT_TRIES 1000000 - -/* Private functions */ - -/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ -static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) -{ - const struct dpll_data *dd; - u32 v; - - dd = clk->dpll_data; - - v = omap2_clk_readl(clk, dd->control_reg); - v &= ~dd->enable_mask; - v |= clken_bits << __ffs(dd->enable_mask); - omap2_clk_writel(v, clk, dd->control_reg); -} - -/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ -static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) -{ - const struct dpll_data *dd; - int i = 0; - int ret = -EINVAL; - const char *clk_name; - - dd = clk->dpll_data; - clk_name = __clk_get_name(clk->hw.clk); - - state <<= __ffs(dd->idlest_mask); - - while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) - != state) && i < MAX_DPLL_WAIT_TRIES) { - i++; - udelay(1); - } - - if (i == MAX_DPLL_WAIT_TRIES) { - printk(KERN_ERR "clock: %s failed transition to '%s'\n", - clk_name, (state) ? "locked" : "bypassed"); - } else { - pr_debug("clock: %s transition to '%s' in %d loops\n", - clk_name, (state) ? "locked" : "bypassed", i); - - ret = 0; - } - - return ret; -} - -/* From 3430 TRM ES2 4.7.6.2 */ -static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) -{ - unsigned long fint; - u16 f = 0; - - fint = __clk_get_rate(clk->dpll_data->clk_ref) / n; - - pr_debug("clock: fint is %lu\n", fint); - - if (fint >= 750000 && fint <= 1000000) - f = 0x3; - else if (fint > 1000000 && fint <= 1250000) - f = 0x4; - else if (fint > 1250000 && fint <= 1500000) - f = 0x5; - else if (fint > 1500000 && fint <= 1750000) - f = 0x6; - else if (fint > 1750000 && fint <= 2100000) - f = 0x7; - else if (fint > 7500000 && fint <= 10000000) - f = 0xB; - else if (fint > 10000000 && fint <= 12500000) - f = 0xC; - else if (fint > 12500000 && fint <= 15000000) - f = 0xD; - else if (fint > 15000000 && fint <= 17500000) - f = 0xE; - else if (fint > 17500000 && fint <= 21000000) - f = 0xF; - else - pr_debug("clock: unknown freqsel setting for %d\n", n); - - return f; -} - -/* - * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report - * readiness before returning. Will save and restore the DPLL's - * autoidle state across the enable, per the CDP code. If the DPLL - * locked successfully, return 0; if the DPLL did not lock in the time - * allotted, or DPLL3 was passed in, return -EINVAL. - */ -static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u8 ai; - u8 state = 1; - int r = 0; - - pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk)); - - dd = clk->dpll_data; - state <<= __ffs(dd->idlest_mask); - - /* Check if already locked */ - if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state) - goto done; - - ai = omap3_dpll_autoidle_read(clk); - - if (ai) - omap3_dpll_deny_idle(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOCKED); - - r = _omap3_wait_dpll_status(clk, 1); - - if (ai) - omap3_dpll_allow_idle(clk); - -done: - return r; -} - -/* - * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power bypass mode. In - * bypass mode, the DPLL's rate is set equal to its parent clock's - * rate. Waits for the DPLL to report readiness before returning. - * Will save and restore the DPLL's autoidle state across the enable, - * per the CDP code. If the DPLL entered bypass mode successfully, - * return 0; if the DPLL did not enter bypass in the time allotted, or - * DPLL3 was passed in, or the DPLL does not support low-power bypass, - * return -EINVAL. - */ -static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) -{ - int r; - u8 ai; - - if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) - return -EINVAL; - - pr_debug("clock: configuring DPLL %s for low-power bypass\n", - __clk_get_name(clk->hw.clk)); - - ai = omap3_dpll_autoidle_read(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); - - r = _omap3_wait_dpll_status(clk, 0); - - if (ai) - omap3_dpll_allow_idle(clk); - - return r; -} - -/* - * _omap3_noncore_dpll_stop - instruct a DPLL to stop - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power stop. Will save and - * restore the DPLL's autoidle state across the stop, per the CDP - * code. If DPLL3 was passed in, or the DPLL does not support - * low-power stop, return -EINVAL; otherwise, return 0. - */ -static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) -{ - u8 ai; - - if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) - return -EINVAL; - - pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk)); - - ai = omap3_dpll_autoidle_read(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); - - if (ai) - omap3_dpll_allow_idle(clk); - - return 0; -} - -/** - * _lookup_dco - Lookup DCO used by j-type DPLL - * @clk: pointer to a DPLL struct clk - * @dco: digital control oscillator selector - * @m: DPLL multiplier to set - * @n: DPLL divider to set - * - * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" - * - * XXX This code is not needed for 3430/AM35xx; can it be optimized - * out in non-multi-OMAP builds for those chips? - */ -static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) -{ - unsigned long fint, clkinp; /* watch out for overflow */ - - clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); - fint = (clkinp / n) * m; - - if (fint < 1000000000) - *dco = 2; - else - *dco = 4; -} - -/** - * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL - * @clk: pointer to a DPLL struct clk - * @sd_div: target sigma-delta divider - * @m: DPLL multiplier to set - * @n: DPLL divider to set - * - * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" - * - * XXX This code is not needed for 3430/AM35xx; can it be optimized - * out in non-multi-OMAP builds for those chips? - */ -static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) -{ - unsigned long clkinp, sd; /* watch out for overflow */ - int mod1, mod2; - - clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); - - /* - * target sigma-delta to near 250MHz - * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] - */ - clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ - mod1 = (clkinp * m) % (250 * n); - sd = (clkinp * m) / (250 * n); - mod2 = sd % 10; - sd /= 10; - - if (mod1 || mod2) - sd++; - *sd_div = sd; -} - -/* - * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly - * @clk: struct clk * of DPLL to set - * @freqsel: FREQSEL value to set - * - * Program the DPLL with the last M, N values calculated, and wait for - * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. - */ -static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) -{ - struct dpll_data *dd = clk->dpll_data; - u8 dco, sd_div; - u32 v; - - /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ - _omap3_noncore_dpll_bypass(clk); - - /* - * Set jitter correction. Jitter correction applicable for OMAP343X - * only since freqsel field is no longer present on other devices. - */ - if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) { - v = omap2_clk_readl(clk, dd->control_reg); - v &= ~dd->freqsel_mask; - v |= freqsel << __ffs(dd->freqsel_mask); - omap2_clk_writel(v, clk, dd->control_reg); - } - - /* Set DPLL multiplier, divider */ - v = omap2_clk_readl(clk, dd->mult_div1_reg); - - /* Handle Duty Cycle Correction */ - if (dd->dcc_mask) { - if (dd->last_rounded_rate >= dd->dcc_rate) - v |= dd->dcc_mask; /* Enable DCC */ - else - v &= ~dd->dcc_mask; /* Disable DCC */ - } - - v &= ~(dd->mult_mask | dd->div1_mask); - v |= dd->last_rounded_m << __ffs(dd->mult_mask); - v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); - - /* Configure dco and sd_div for dplls that have these fields */ - if (dd->dco_mask) { - _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); - v &= ~(dd->dco_mask); - v |= dco << __ffs(dd->dco_mask); - } - if (dd->sddiv_mask) { - _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, - dd->last_rounded_n); - v &= ~(dd->sddiv_mask); - v |= sd_div << __ffs(dd->sddiv_mask); - } - - omap2_clk_writel(v, clk, dd->mult_div1_reg); - - /* Set 4X multiplier and low-power mode */ - if (dd->m4xen_mask || dd->lpmode_mask) { - v = omap2_clk_readl(clk, dd->control_reg); - - if (dd->m4xen_mask) { - if (dd->last_rounded_m4xen) - v |= dd->m4xen_mask; - else - v &= ~dd->m4xen_mask; - } - - if (dd->lpmode_mask) { - if (dd->last_rounded_lpmode) - v |= dd->lpmode_mask; - else - v &= ~dd->lpmode_mask; - } - - omap2_clk_writel(v, clk, dd->control_reg); - } - - /* We let the clock framework set the other output dividers later */ - - /* REVISIT: Set ramp-up delay? */ - - _omap3_noncore_dpll_lock(clk); - - return 0; -} - -/* Public functions */ - -/** - * omap3_dpll_recalc - recalculate DPLL rate - * @clk: DPLL struct clk - * - * Recalculate and propagate the DPLL rate. - */ -unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - return omap2_get_dpll_rate(clk); -} - -/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ - -/** - * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. - * The choice of modes depends on the DPLL's programmed rate: if it is - * the same as the DPLL's parent clock, it will enter bypass; - * otherwise, it will enter lock. This code will wait for the DPLL to - * indicate readiness before returning, unless the DPLL takes too long - * to enter the target state. Intended to be used as the struct clk's - * enable function. If DPLL3 was passed in, or the DPLL does not - * support low-power stop, or if the DPLL took too long to enter - * bypass or lock, return -EINVAL; otherwise, return 0. - */ -int omap3_noncore_dpll_enable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int r; - struct dpll_data *dd; - struct clk_hw *parent; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (clk->clkdm) { - r = clkdm_clk_enable(clk->clkdm, hw->clk); - if (r) { - WARN(1, - "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), - clk->clkdm->name, r); - return r; - } - } - - parent = __clk_get_hw(__clk_get_parent(hw->clk)); - - if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { - WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); - r = _omap3_noncore_dpll_bypass(clk); - } else { - WARN_ON(parent != __clk_get_hw(dd->clk_ref)); - r = _omap3_noncore_dpll_lock(clk); - } - - return r; -} - -/** - * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power stop. This function is - * intended for use in struct clkops. No return value. - */ -void omap3_noncore_dpll_disable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - _omap3_noncore_dpll_stop(clk); - if (clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); -} - - -/* Non-CORE DPLL rate set code */ - -/** - * omap3_noncore_dpll_determine_rate - determine rate for a DPLL - * @hw: pointer to the clock to determine rate for - * @rate: target rate for the DPLL - * @best_parent_rate: pointer for returning best parent rate - * @best_parent_clk: pointer for returning best parent clock - * - * Determines which DPLL mode to use for reaching a desired target rate. - * Checks whether the DPLL shall be in bypass or locked mode, and if - * locked, calculates the M,N values for the DPLL via round-rate. - * Returns a positive clock rate with success, negative error value - * in failure. - */ -long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_rate(dd->clk_bypass) == rate && - (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = __clk_get_hw(dd->clk_bypass); - } else { - rate = omap2_dpll_round_rate(hw, rate, best_parent_rate); - *best_parent_clk = __clk_get_hw(dd->clk_ref); - } - - *best_parent_rate = rate; - - return rate; -} - -/** - * omap3_noncore_dpll_set_parent - set parent for a DPLL clock - * @hw: pointer to the clock to set parent for - * @index: parent index to select - * - * Sets parent for a DPLL clock. This sets the DPLL into bypass or - * locked mode. Returns 0 with success, negative error value otherwise. - */ -int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int ret; - - if (!hw) - return -EINVAL; - - if (index) - ret = _omap3_noncore_dpll_bypass(clk); - else - ret = _omap3_noncore_dpll_lock(clk); - - return ret; -} - -/** - * omap3_noncore_dpll_set_rate - set rate for a DPLL clock - * @hw: pointer to the clock to set parent for - * @rate: target rate for the clock - * @parent_rate: rate of the parent clock - * - * Sets rate for a DPLL clock. First checks if the clock parent is - * reference clock (in bypass mode, the rate of the clock can't be - * changed) and proceeds with the rate change operation. Returns 0 - * with success, negative error value otherwise. - */ -int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - u16 freqsel = 0; - int ret; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_hw(__clk_get_parent(hw->clk)) != - __clk_get_hw(dd->clk_ref)) - return -EINVAL; - - if (dd->last_rounded_rate == 0) - return -EINVAL; - - /* Freqsel is available only on OMAP343X devices */ - if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) { - freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); - WARN_ON(!freqsel); - } - - pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, - __clk_get_name(hw->clk), rate); - - ret = omap3_noncore_dpll_program(clk, freqsel); - - return ret; -} - -/** - * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock - * @hw: pointer to the clock to set rate and parent for - * @rate: target rate for the DPLL - * @parent_rate: clock rate of the DPLL parent - * @index: new parent index for the DPLL, 0 - reference, 1 - bypass - * - * Sets rate and parent for a DPLL clock. If new parent is the bypass - * clock, only selects the parent. Otherwise proceeds with a rate - * change, as this will effectively also change the parent as the - * DPLL is put into locked mode. Returns 0 with success, negative error - * value otherwise. - */ -int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, - unsigned long rate, - unsigned long parent_rate, - u8 index) -{ - int ret; - - if (!hw || !rate) - return -EINVAL; - - /* - * clk-ref at index[0], in which case we only need to set rate, - * the parent will be changed automatically with the lock sequence. - * With clk-bypass case we only need to change parent. - */ - if (index) - ret = omap3_noncore_dpll_set_parent(hw, index); - else - ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate); - - return ret; -} - -/* DPLL autoidle read/set code */ - -/** - * omap3_dpll_autoidle_read - read a DPLL's autoidle bits - * @clk: struct clk * of the DPLL to read - * - * Return the DPLL's autoidle bits, shifted down to bit 0. Returns - * -EINVAL if passed a null pointer or if the struct clk does not - * appear to refer to a DPLL. - */ -u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return -EINVAL; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return -EINVAL; - - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= dd->autoidle_mask; - v >>= __ffs(dd->autoidle_mask); - - return v; -} - -/** - * omap3_dpll_allow_idle - enable DPLL autoidle bits - * @clk: struct clk * of the DPLL to operate on - * - * Enable DPLL automatic idle control. This automatic idle mode - * switching takes effect only when the DPLL is locked, at least on - * OMAP3430. The DPLL will enter low-power stop when its downstream - * clocks are gated. No return value. - */ -void omap3_dpll_allow_idle(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return; - - /* - * REVISIT: CORE DPLL can optionally enter low-power bypass - * by writing 0x5 instead of 0x1. Add some mechanism to - * optionally enter this mode. - */ - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= ~dd->autoidle_mask; - v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); - omap2_clk_writel(v, clk, dd->autoidle_reg); - -} - -/** - * omap3_dpll_deny_idle - prevent DPLL from automatically idling - * @clk: struct clk * of the DPLL to operate on - * - * Disable DPLL automatic idle control. No return value. - */ -void omap3_dpll_deny_idle(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return; - - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= ~dd->autoidle_mask; - v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); - omap2_clk_writel(v, clk, dd->autoidle_reg); - -} - -/* Clock control for DPLL outputs */ - -/* Find the parent DPLL for the given clkoutx2 clock */ -static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) -{ - struct clk_hw_omap *pclk = NULL; - struct clk *parent; - - /* Walk up the parents of clk, looking for a DPLL */ - do { - do { - parent = __clk_get_parent(hw->clk); - hw = __clk_get_hw(parent); - } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); - if (!hw) - break; - pclk = to_clk_hw_omap(hw); - } while (pclk && !pclk->dpll_data); - - /* clk does not have a DPLL as a parent? error in the clock data */ - if (!pclk) { - WARN_ON(1); - return NULL; - } - - return pclk; -} - -/** - * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate - * @clk: DPLL output struct clk - * - * Using parent clock DPLL data, look up DPLL state. If locked, set our - * rate to the dpll_clk * 2; otherwise, just use dpll_clk. - */ -unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, - unsigned long parent_rate) -{ - const struct dpll_data *dd; - unsigned long rate; - u32 v; - struct clk_hw_omap *pclk = NULL; - - if (!parent_rate) - return 0; - - pclk = omap3_find_clkoutx2_dpll(hw); - - if (!pclk) - return 0; - - dd = pclk->dpll_data; - - WARN_ON(!dd->enable_mask); - - v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; - v >>= __ffs(dd->enable_mask); - if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) - rate = parent_rate; - else - rate = parent_rate * 2; - return rate; -} - -int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - return 0; -} - -long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - const struct dpll_data *dd; - u32 v; - struct clk_hw_omap *pclk = NULL; - - if (!*prate) - return 0; - - pclk = omap3_find_clkoutx2_dpll(hw); - - if (!pclk) - return 0; - - dd = pclk->dpll_data; - - /* TYPE J does not have a clkoutx2 */ - if (dd->flags & DPLL_J_TYPE) { - *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); - return *prate; - } - - WARN_ON(!dd->enable_mask); - - v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - /* If in bypass, the rate is fixed to the bypass rate*/ - if (v != OMAP3XXX_EN_DPLL_LOCKED) - return *prate; - - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { - unsigned long best_parent; - - best_parent = (rate / 2); - *prate = __clk_round_rate(__clk_get_parent(hw->clk), - best_parent); - } - - return *prate * 2; -} - -/* OMAP3/4 non-CORE DPLL clkops */ -const struct clk_hw_omap_ops clkhwops_omap3_dpll = { - .allow_idle = omap3_dpll_allow_idle, - .deny_idle = omap3_dpll_deny_idle, -}; diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c deleted file mode 100644 index f231be05b9a6..000000000000 --- a/arch/arm/mach-omap2/dpll44xx.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * OMAP4-specific DPLL control functions - * - * Copyright (C) 2011 Texas Instruments, Inc. - * Rajendra Nayak - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/bitops.h> - -#include "clock.h" - -/* - * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that - * can supported when using the DPLL low-power mode. Frequencies are - * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control, - * Status, and Low-Power Operation Mode". - */ -#define OMAP4_DPLL_LP_FINT_MAX 1000000 -#define OMAP4_DPLL_LP_FOUT_MAX 100000000 - -/* - * Bitfield declarations - */ -#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8) -#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK (1 << 10) -#define OMAP4430_DPLL_REGM4XEN_MASK (1 << 11) - -/* Static rate multiplier for OMAP4 REGM4XEN clocks */ -#define OMAP4430_REGM4XEN_MULT 4 - -void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk) -{ - u32 v; - u32 mask; - - if (!clk || !clk->clksel_reg) - return; - - mask = clk->flags & CLOCK_CLKOUTX2 ? - OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : - OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - - v = omap2_clk_readl(clk, clk->clksel_reg); - /* Clear the bit to allow gatectrl */ - v &= ~mask; - omap2_clk_writel(v, clk, clk->clksel_reg); -} - -void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) -{ - u32 v; - u32 mask; - - if (!clk || !clk->clksel_reg) - return; - - mask = clk->flags & CLOCK_CLKOUTX2 ? - OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : - OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - - v = omap2_clk_readl(clk, clk->clksel_reg); - /* Set the bit to deny gatectrl */ - v |= mask; - omap2_clk_writel(v, clk, clk->clksel_reg); -} - -const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = { - .allow_idle = omap4_dpllmx_allow_gatectrl, - .deny_idle = omap4_dpllmx_deny_gatectrl, -}; - -/** - * omap4_dpll_lpmode_recalc - compute DPLL low-power setting - * @dd: pointer to the dpll data structure - * - * Calculates if low-power mode can be enabled based upon the last - * multiplier and divider values calculated. If low-power mode can be - * enabled, then the bit to enable low-power mode is stored in the - * last_rounded_lpmode variable. This implementation is based upon the - * criteria for enabling low-power mode as described in the OMAP4430/60 - * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power - * Operation Mode". - */ -static void omap4_dpll_lpmode_recalc(struct dpll_data *dd) -{ - long fint, fout; - - fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); - fout = fint * dd->last_rounded_m; - - if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX)) - dd->last_rounded_lpmode = 1; - else - dd->last_rounded_lpmode = 0; -} - -/** - * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit - * @clk: struct clk * of the DPLL to compute the rate for - * - * Compute the output rate for the OMAP4 DPLL represented by @clk. - * Takes the REGM4XEN bit into consideration, which is needed for the - * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers) - * upon success, or 0 upon error. - */ -unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - unsigned long rate; - struct dpll_data *dd; - - if (!clk || !clk->dpll_data) - return 0; - - dd = clk->dpll_data; - - rate = omap2_get_dpll_rate(clk); - - /* regm4xen adds a multiplier of 4 to DPLL calculations */ - v = omap2_clk_readl(clk, dd->control_reg); - if (v & OMAP4430_DPLL_REGM4XEN_MASK) - rate *= OMAP4430_REGM4XEN_MULT; - - return rate; -} - -/** - * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit - * @clk: struct clk * of the DPLL to round a rate for - * @target_rate: the desired rate of the DPLL - * - * Compute the rate that would be programmed into the DPLL hardware - * for @clk if set_rate() were to be provided with the rate - * @target_rate. Takes the REGM4XEN bit into consideration, which is - * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before - * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or - * ~0 if an error occurred in omap2_dpll_round_rate(). - */ -long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, - unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - long r; - - if (!clk || !clk->dpll_data) - return -EINVAL; - - dd = clk->dpll_data; - - dd->last_rounded_m4xen = 0; - - /* - * First try to compute the DPLL configuration for - * target rate without using the 4X multiplier. - */ - r = omap2_dpll_round_rate(hw, target_rate, NULL); - if (r != ~0) - goto out; - - /* - * If we did not find a valid DPLL configuration, try again, but - * this time see if using the 4X multiplier can help. Enabling the - * 4X multiplier is equivalent to dividing the target rate by 4. - */ - r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT, - NULL); - if (r == ~0) - return r; - - dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT; - dd->last_rounded_m4xen = 1; - -out: - omap4_dpll_lpmode_recalc(dd); - - return dd->last_rounded_rate; -} - -/** - * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL - * @hw: pointer to the clock to determine rate for - * @rate: target rate for the DPLL - * @best_parent_rate: pointer for returning best parent rate - * @best_parent_clk: pointer for returning best parent clock - * - * Determines which DPLL mode to use for reaching a desired rate. - * Checks whether the DPLL shall be in bypass or locked mode, and if - * locked, calculates the M,N values for the DPLL via round-rate. - * Returns a positive clock rate with success, negative error value - * in failure. - */ -long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_rate(dd->clk_bypass) == rate && - (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = __clk_get_hw(dd->clk_bypass); - } else { - rate = omap4_dpll_regm4xen_round_rate(hw, rate, - best_parent_rate); - *best_parent_clk = __clk_get_hw(dd->clk_ref); - } - - *best_parent_rate = rate; - - return rate; -} diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 820dde8b5b04..a253aafbb9a2 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -37,7 +37,6 @@ #include "clock.h" #include "clock2xxx.h" #include "clock3xxx.h" -#include "clock44xx.h" #include "omap-pm.h" #include "sdrc.h" #include "control.h" @@ -723,6 +722,8 @@ int __init omap_clk_init(void) ti_clk_init_features(); + omap2_clk_setup_ll_ops(); + if (of_have_populated_dt()) { ret = omap_control_init(); if (ret) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 486cc4ded190..6ef9e6341d96 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -130,6 +130,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/io.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index b1aad7e1426c..2a1a4180d5d0 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -25,6 +25,7 @@ #include <linux/sysfs.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/irq.h> #include <linux/time.h> diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c index 79f033b1ddff..d0871786dd8a 100644 --- a/arch/arm/mach-orion5x/board-dt.c +++ b/arch/arm/mach-orion5x/board-dt.c @@ -16,7 +16,6 @@ #include <linux/of_platform.h> #include <linux/cpu.h> #include <linux/mbus.h> -#include <linux/clk-provider.h> #include <linux/clocksource.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 16547f2641a3..25d6676f8d6e 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/clk-provider.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/serial_core.h> diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 5d1a318f1302..0fa4c5f8b1be 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -8,6 +8,7 @@ menuconfig ARCH_TEGRA select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select PINCTRL + select PM_OPP select ARCH_HAS_RESET_CONTROLLER select RESET_CONTROLLER select SOC_BUS diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index e31d3d61c998..b316e18a76aa 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c @@ -72,21 +72,12 @@ void __init ux500_init_irq(void) * Init clocks here so that they are available for system timer * initialization. */ - if (cpu_is_u8500_family()) { - u8500_of_clk_init(U8500_CLKRST1_BASE, - U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, - U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } else if (cpu_is_u9540()) { - u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } else if (cpu_is_u8540()) { - u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } + if (cpu_is_u8500_family()) + u8500_clk_init(); + else if (cpu_is_u9540()) + u9540_clk_init(); + else if (cpu_is_u8540()) + u8540_clk_init(); } static const char * __init ux500_get_machine(void) diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 58093edeea2e..d831bc2ac204 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -490,7 +490,8 @@ 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */ - 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */ + 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xf0 0x00000000 0xf0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -513,8 +514,9 @@ reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */ 0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xd1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xd8 0x00000000 0xd8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -537,8 +539,9 @@ reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */ 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0x90 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0x91 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0x94 0x00000000 0x94 0x00000000 0x04 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -561,8 +564,9 @@ reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */ 0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xa0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xa1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -585,8 +589,9 @@ reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */ 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xc0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xc1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xc8 0x00000000 0xc8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 3303e8a7b837..f4bf2f2a014c 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, num_rounds(ctx)); - scatterwalk_start(&walk, req->assoc); + scatterwalk_start(&walk, req->src); do { u32 n = scatterwalk_clamp(&walk, len); @@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req) struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; @@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - blkcipher_walk_init(&walk, req->dst, req->src, len); + src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); + dst = src; + if (req->src != req->dst) + dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); + + blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); @@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req) return err; /* copy authtag to end of dst */ - scatterwalk_map_and_copy(mac, req->dst, req->cryptlen, + scatterwalk_map_and_copy(mac, dst, req->cryptlen, crypto_aead_authsize(aead), 1); return 0; @@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req) unsigned int authsize = crypto_aead_authsize(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; @@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - blkcipher_walk_init(&walk, req->dst, req->src, len); + src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); + dst = src; + if (req->src != req->dst) + dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); + + blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); @@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req) return err; /* compare calculated auth tag with the stored one */ - scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize, + scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize, authsize, 0); - if (memcmp(mac, buf, authsize)) + if (crypto_memneq(mac, buf, authsize)) return -EBADMSG; return 0; } -static struct crypto_alg ccm_aes_alg = { - .cra_name = "ccm(aes)", - .cra_driver_name = "ccm-aes-ce", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = ccm_setkey, - .setauthsize = ccm_setauthsize, - .encrypt = ccm_encrypt, - .decrypt = ccm_decrypt, - } +static struct aead_alg ccm_aes_alg = { + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-ce", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm_setkey, + .setauthsize = ccm_setauthsize, + .encrypt = ccm_encrypt, + .decrypt = ccm_decrypt, }; static int __init aes_mod_init(void) { if (!(elf_hwcap & HWCAP_AES)) return -ENODEV; - return crypto_register_alg(&ccm_aes_alg); + return crypto_register_aead(&ccm_aes_alg); } static void __exit aes_mod_exit(void) { - crypto_unregister_alg(&ccm_aes_alg); + crypto_unregister_aead(&ccm_aes_alg); } module_init(aes_mod_init); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f3067d4d4e35..926ae8d9abc5 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -34,7 +34,6 @@ #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/root_dev.h> -#include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index f02530e726f6..85c57158dcd9 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_abt32(vcpu, false, addr); - - inject_abt64(vcpu, false, addr); + else + inject_abt64(vcpu, false, addr); } /** @@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_abt32(vcpu, true, addr); - - inject_abt64(vcpu, true, addr); + else + inject_abt64(vcpu, true, addr); } /** @@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_undef32(vcpu); - - inject_undef64(vcpu); + else + inject_undef64(vcpu); } diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h index 9a8e9d5208d4..6f00581c3d4f 100644 --- a/arch/avr32/include/asm/switch_to.h +++ b/arch/avr32/include/asm/switch_to.h @@ -15,11 +15,13 @@ */ #ifdef CONFIG_OWNERSHIP_TRACE #include <asm/ocd.h> -#define finish_arch_switch(prev) \ +#define ocd_switch(prev, next) \ do { \ ocd_write(PID, prev->pid); \ - ocd_write(PID, current->pid); \ + ocd_write(PID, next->pid); \ } while(0) +#else +#define ocd_switch(prev, next) #endif /* @@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct cpu_context *); #define switch_to(prev, next, last) \ do { \ + ocd_switch(prev, next); \ last = __switch_to(prev, &prev->thread.cpu_context + 1, \ &next->thread.cpu_context); \ } while (0) diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c index 0635bd6c2af3..34bb4b13e079 100644 --- a/arch/frv/mb93090-mb00/pci-frv.c +++ b/arch/frv/mb93090-mb00/pci-frv.c @@ -175,14 +175,6 @@ static void __init pcibios_assign_resources(void) if (!r->start && r->end) pci_assign_resource(dev, idx); } - - if (pci_probe & PCI_ASSIGN_ROMS) { - r = &dev->resource[PCI_ROM_RESOURCE]; - r->end -= r->start; - r->start = 0; - if (r->end) - pci_assign_resource(dev, PCI_ROM_RESOURCE); - } } } diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h index a7e487fe76ed..d51992ff5a61 100644 --- a/arch/frv/mb93090-mb00/pci-frv.h +++ b/arch/frv/mb93090-mb00/pci-frv.h @@ -14,14 +14,6 @@ #define DBG(x...) #endif -#define PCI_PROBE_BIOS 0x0001 -#define PCI_PROBE_CONF1 0x0002 -#define PCI_PROBE_CONF2 0x0004 -#define PCI_NO_CHECKS 0x0400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 -#define PCI_ASSIGN_ALL_BUSSES 0x4000 - extern unsigned int __nongpreldata pci_probe; /* pci-frv.c */ diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index f211839e2cae..f9c86c475bbd 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c @@ -294,8 +294,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif - pci_read_bridge_bases(bus); - if (bus->number == 0) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7cc3be9fa7c6..d89b6013c941 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -533,10 +533,9 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; - if (b->self) { - pci_read_bridge_bases(b); + if (b->self) pcibios_fixup_bridge_resources(b->self); - } + list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); platform_pci_fixup_bus(b); diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 753a6237f99a..0b6b40d37b95 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -57,7 +57,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -91,6 +91,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -287,7 +288,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -320,7 +320,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -345,6 +344,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -355,6 +355,7 @@ CONFIG_ARIADNE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -363,6 +364,7 @@ CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -448,6 +450,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -536,6 +539,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -543,6 +547,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -571,14 +576,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 1f93dcaf02e5..eeb3a8991fc4 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -279,7 +280,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -302,7 +302,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -327,17 +326,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -406,6 +408,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -494,6 +497,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -501,6 +505,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -529,14 +534,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 831b8b8b92ad..3a7006654ce9 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -283,7 +284,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -311,7 +311,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -336,6 +335,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -343,11 +343,13 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -428,6 +430,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -516,6 +519,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -523,6 +527,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -551,14 +556,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 91fd187c16d5..0586b323a673 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -277,7 +278,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -301,7 +301,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -326,17 +325,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 9d4934f1d2c3..ad1dbce07aa4 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -279,7 +280,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -302,7 +302,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -327,6 +326,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -334,11 +334,13 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -408,6 +410,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -496,6 +499,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -503,6 +507,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -531,14 +536,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 72bc187ca995..b44acacaecf4 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -54,7 +54,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -88,6 +88,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -282,7 +283,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -311,7 +311,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -343,6 +342,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -351,12 +351,14 @@ CONFIG_MACMACE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y CONFIG_MAC8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -430,6 +432,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -518,6 +521,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -525,6 +529,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -553,14 +558,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8fb65535597f..8afca3753db1 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -64,7 +64,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -98,6 +98,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -301,7 +302,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -344,7 +344,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -376,6 +375,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -391,6 +391,7 @@ CONFIG_MACMACE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y @@ -403,6 +404,7 @@ CONFIG_NE2000=y CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -510,6 +512,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -598,6 +601,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -605,6 +609,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -633,14 +638,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index f34491ec0126..ef00875994d9 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -52,7 +52,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -86,6 +86,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -276,7 +277,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -300,7 +300,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -325,6 +324,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -332,11 +332,13 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 3d3614d1b041..387c2bd90ff1 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -277,7 +278,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -301,7 +301,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -326,17 +325,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 643e9c93bea7..35355c1bc714 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -280,7 +281,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -307,7 +307,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -332,6 +331,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -341,12 +341,14 @@ CONFIG_VETH=m # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -421,6 +423,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -509,6 +512,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -516,6 +520,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -544,14 +549,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8fecc5aa166c..8442d267b877 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -50,7 +50,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -84,6 +84,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -274,7 +275,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -298,7 +298,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -323,17 +322,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -400,6 +402,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 9902c5bfbdc8..0e1b542e1555 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -50,7 +50,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -84,6 +84,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -274,7 +275,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -298,7 +298,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -323,6 +322,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -330,11 +330,13 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -400,6 +402,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -488,6 +491,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -495,6 +499,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -523,14 +528,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c index 7ee853e1432b..2a33a9645ad8 100644 --- a/arch/m68k/kernel/bootinfo_proc.c +++ b/arch/m68k/kernel/bootinfo_proc.c @@ -62,12 +62,10 @@ static int __init init_bootinfo_procfs(void) if (!bootinfo_size) return -EINVAL; - bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); + bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL); if (!bootinfo_copy) return -ENOMEM; - memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); - pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); if (!pde) { kfree(bootinfo_copy); diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index ae838ed5fcf2..6b8b75266801 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -863,14 +863,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) void pcibios_fixup_bus(struct pci_bus *bus) { - /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases. This is -not- called when generating the PCI tree from - * the OF device-tree. - */ - if (bus->self != NULL) - pci_read_bridge_bases(bus); - - /* Now fixup the bus bus */ + /* Fixup the bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 6e46abe0dac6..bd34f4093cd9 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/io.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/slab.h> @@ -389,12 +390,11 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate, return div1; } -static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk, - int scale, int maxdiv) +static int alchemy_clk_fgcs_detr(struct clk_hw *hw, + struct clk_rate_request *req, + int scale, int maxdiv) { - struct clk *pc, *bpc, *free; + struct clk_hw *pc, *bpc, *free; long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; int j; @@ -408,7 +408,7 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * the one that gets closest to but not over the requested rate. */ for (j = 0; j < 7; j++) { - pc = clk_get_parent_by_index(hw->clk, j); + pc = clk_hw_get_parent_by_index(hw, j); if (!pc) break; @@ -416,20 +416,20 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * XXX: we would actually want clk_has_active_children() * but this is a good-enough approximation for now. */ - if (!__clk_is_prepared(pc)) { + if (!clk_hw_is_prepared(pc)) { if (!free) free = pc; } - pr = clk_get_rate(pc); - if (pr < rate) + pr = clk_hw_get_rate(pc); + if (pr < req->rate) continue; /* what can hardware actually provide */ - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { @@ -448,15 +448,16 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, */ if (lastdiff && free) { for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { - tpr = rate * j; + tpr = req->rate * j; if (tpr < 0) break; - pr = clk_round_rate(free, tpr); + pr = clk_hw_round_rate(free, tpr); - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, + NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { lastdiff = diff; @@ -469,9 +470,14 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, } } - *best_parent_rate = bpr; - *best_parent_clk = __clk_get_hw(bpc); - return br; + if (br < 0) + return br; + + req->best_parent_rate = bpr; + req->best_parent_hw = bpc; + req->rate = br; + + return 0; } static int alchemy_clk_fgv1_en(struct clk_hw *hw) @@ -562,14 +568,10 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, return parent_rate / v; } -static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv1_detr(struct clk_hw *hw, + struct clk_rate_request *req) { - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, 2, 512); + return alchemy_clk_fgcs_detr(hw, req, 2, 512); } /* Au1000, Au1100, Au15x0, Au12x0 */ @@ -696,11 +698,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, return t; } -static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv2_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale, maxdiv; @@ -713,8 +712,7 @@ static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, maxdiv = 512; } - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, maxdiv); + return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); } /* Au1300 larger input mux, no separate disable bit, flexible divider */ @@ -917,17 +915,13 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, return 0; } -static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_csrc_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, 4); + return alchemy_clk_fgcs_detr(hw, req, scale, 4); } static struct clk_ops alchemy_clkops_csrc = { diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 7163cd7fdd69..9733cd0266e4 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ } \ } while (0) +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following resume() will be skipped for new threads. + * So everything that matters to new threads should be placed before resume(). + */ #define switch_to(prev, next, last) \ do { \ - u32 __c0_stat; \ s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ + if (cpu_has_dsp) { \ __save_dsp(prev); \ - if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ - if (cop2_lazy_restore) \ - KSTK_STATUS(prev) &= ~ST0_CU2; \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(prev); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ + __restore_dsp(next); \ + } \ + if (cop2_present) { \ + set_c0_status(ST0_CU2); \ + if ((KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + cop2_save(prev); \ + } \ + if (KSTK_STATUS(next) & ST0_CU2 && \ + !cop2_lazy_restore) { \ + cop2_restore(next); \ + } \ + clear_c0_status(ST0_CU2); \ } \ __clear_software_ll_bit(); \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ __fpsave = FP_SAVE_SCALAR; \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ - (last) = resume(prev, next, task_thread_info(next), __fpsave); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - u32 __c0_stat; \ - if (cop2_present && !cop2_lazy_restore && \ - (KSTK_STATUS(current) & ST0_CU2)) { \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(current); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ - } \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ + write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(); \ disable_msa(); \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index b8a0bf5766f2..c6996cf67a5c 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -311,12 +311,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - if (pci_has_flag(PCI_PROBE_ONLY) && dev && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); - } } EXPORT_SYMBOL(PCIBIOS_MIN_IO); diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c index b5b036f64275..b7ab8378964c 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.c +++ b/arch/mn10300/unit-asb2305/pci-asb2305.c @@ -183,18 +183,16 @@ static int __init pcibios_assign_resources(void) struct pci_dev *dev = NULL; struct resource *r; - if (!(pci_probe & PCI_ASSIGN_ROMS)) { - /* Try to use BIOS settings for ROMs, otherwise let - pci_assign_unassigned_resources() allocate the new - addresses. */ - for_each_pci_dev(dev) { - r = &dev->resource[PCI_ROM_RESOURCE]; - if (!r->flags || !r->start) - continue; - if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { - r->end -= r->start; - r->start = 0; - } + /* Try to use BIOS settings for ROMs, otherwise let + pci_assign_unassigned_resources() allocate the new + addresses. */ + for_each_pci_dev(dev) { + r = &dev->resource[PCI_ROM_RESOURCE]; + if (!r->flags || !r->start) + continue; + if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { + r->end -= r->start; + r->start = 0; } } diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h index 9e17aca5a2a1..96c484b12226 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.h +++ b/arch/mn10300/unit-asb2305/pci-asb2305.h @@ -20,13 +20,6 @@ #define DBG(x...) #endif -#define PCI_PROBE_BIOS 1 -#define PCI_PROBE_CONF1 2 -#define PCI_PROBE_CONF2 4 -#define PCI_NO_CHECKS 0x400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 - extern unsigned int pci_probe; /* pci-asb2305.c */ diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index 3dfe2d31c67b..deaa893efba5 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c @@ -324,7 +324,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) struct pci_dev *dev; if (bus->self) { - pci_read_bridge_bases(bus); pcibios_fixup_bridge_resources(bus->self); } diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index d0ece257d310..04c7e8fc24c2 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -150,7 +150,10 @@ /* Structure of the hardware registers */ struct mpc52xx_psc { - u8 mode; /* PSC + 0x00 */ + union { + u8 mode; /* PSC + 0x00 */ + u8 mr2; + }; u8 reserved0[3]; union { /* PSC + 0x04 */ u16 status; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 4dbe072eecbe..523673d7583c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,8 +28,6 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> -#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ - #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ #ifdef __BIG_ENDIAN__ diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 58abeda64cb7..15cca17cba4b 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} extern void enable_kernel_fp(void); extern void enable_kernel_altivec(void); +extern void enable_kernel_vsx(void); extern int emulate_altivec(struct pt_regs *); extern void __giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index b9de34d44fcb..02c1d5dcee4d 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1044,13 +1044,7 @@ void pcibios_set_master(struct pci_dev *dev) void pcibios_fixup_bus(struct pci_bus *bus) { - /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases. This is -not- called when generating the PCI tree from - * the OF device-tree. - */ - pci_read_bridge_bases(bus); - - /* Now fixup the bus bus */ + /* Fixup the bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 42e02a2d570b..c8c62c7fc31c 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -126,7 +126,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, { struct pci_dev *dev; const char *type; - struct pci_slot *slot; dev = pci_alloc_dev(bus); if (!dev) @@ -145,10 +144,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, dev->needs_freset = 0; /* pcie fundamental reset required */ set_pcie_port_type(dev); - list_for_each_entry(slot, &dev->bus->slots, list) - if (PCI_SLOT(dev->devfn) == slot->number) - dev->slot = slot; - + pci_dev_assign_slot(dev); dev->vendor = get_int_prop(node, "vendor-id", 0xffff); dev->device = get_int_prop(node, "device-id", 0xffff); dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); @@ -191,6 +187,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pci_device_add(dev, bus); + /* Setup MSI caps & disable MSI/MSI-X interrupts */ + pci_msi_setup_pci_dev(dev); + return dev; } EXPORT_SYMBOL(of_create_pci_dev); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8005e18d1b40..64e6e9d9e656 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX -#if 0 -/* not currently used, but some crazy RAID module might want to later */ void enable_kernel_vsx(void) { WARN_ON(preemptible()); @@ -220,7 +218,6 @@ void enable_kernel_vsx(void) #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_vsx); -#endif void giveup_vsx(struct task_struct *tsk) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 68d067ad4222..a9f753fb73a8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vc->runner = vcpu; if (n_ceded == vc->n_runnable) { kvmppc_vcore_blocked(vc); - } else if (should_resched()) { + } else if (need_resched()) { vc->vcore_state = VCORE_PREEMPT; /* Let something else run */ cond_resched_lock(&vc->lock); diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index f691bcabd710..c50ea76ba66c 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -12,6 +12,7 @@ */ #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/device.h> diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 2938934c6518..e256592eb66e 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ +obj-$(CONFIG_NUMA) += numa/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b06dc3839268..4827870f7a6d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -99,18 +99,22 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS2 select DYNAMIC_FTRACE if FUNCTION_TRACER select GENERIC_CLOCKEVENTS + select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_DEVICES if !SMP select GENERIC_FIND_FIRST_BIT select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_EARLY_PFN_TO_NID select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -153,6 +157,7 @@ config S390 select VIRT_CPU_ACCOUNTING select VIRT_TO_BUS + config SCHED_OMIT_FRAME_POINTER def_bool y @@ -385,6 +390,76 @@ config HOTPLUG_CPU config SCHED_SMT def_bool n +# Some NUMA nodes have memory ranges that span +# other nodes. Even though a pfn is valid and +# between a node's start and end pfns, it may not +# reside on that node. See memmap_init_zone() +# for details. <- They meant memory holes! +config NODES_SPAN_OTHER_NODES + def_bool NUMA + +config NUMA + bool "NUMA support" + depends on SMP && 64BIT && SCHED_TOPOLOGY + default n + help + Enable NUMA support + + This option adds NUMA support to the kernel. + + An operation mode can be selected by appending + numa=<method> to the kernel command line. + + The default behaviour is identical to appending numa=plain to + the command line. This will create just one node with all + available memory and all CPUs in it. + +config NODES_SHIFT + int "Maximum NUMA nodes (as a power of 2)" + range 1 10 + depends on NUMA + default "4" + help + Specify the maximum number of NUMA nodes available on the target + system. Increases memory reserved to accommodate various tables. + +menu "Select NUMA modes" + depends on NUMA + +config NUMA_EMU + bool "NUMA emulation" + default y + help + Numa emulation mode will split the available system memory into + equal chunks which then are distributed over the configured number + of nodes in a round-robin manner. + + The number of fake nodes is limited by the number of available memory + chunks (i.e. memory size / fake size) and the number of supported + nodes in the kernel. + + The CPUs are assigned to the nodes in a way that partially respects + the original machine topology (if supported by the machine). + Fair distribution of the CPUs is not guaranteed. + +config EMU_SIZE + hex "NUMA emulation memory chunk size" + default 0x10000000 + range 0x400000 0x100000000 + depends on NUMA_EMU + help + Select the default size by which the memory is chopped and then + assigned to emulated NUMA nodes. + + This can be overridden by specifying + + emu_size=<n> + + on the kernel command line where also suffixes K, M, G, and T are + supported. + +endmenu + config SCHED_MC def_bool n diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 667b1bca5681..e8d4423e4f85 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -33,6 +33,8 @@ mflags-$(CONFIG_MARCH_Z196) := -march=z196 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 mflags-$(CONFIG_MARCH_Z13) := -march=z13 +export CC_FLAGS_MARCH := $(mflags-y) + aflags-y += $(mflags-y) cflags-y += $(mflags-y) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 940cbddd9237..0c98f1508542 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -13,6 +13,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -50,6 +51,7 @@ CONFIG_LIVEPATCH=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 +CONFIG_NUMA=y CONFIG_PREEMPT=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index d793fec91797..82083e1fbdc4 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -13,6 +13,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -49,6 +50,7 @@ CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 +CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 38a77e9c8aa6..c05c9e0821e3 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -13,6 +13,8 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -48,6 +50,7 @@ CONFIG_LIVEPATCH=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 +CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 5566ce80abdb..0b9b95f3c703 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -24,6 +24,7 @@ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <linux/init.h> #include <linux/spinlock.h> #include "crypt_s390.h" @@ -976,7 +977,7 @@ static void __exit aes_s390_fini(void) crypto_unregister_alg(&aes_alg); } -module_init(aes_s390_init); +module_cpu_feature_match(MSA, aes_s390_init); module_exit(aes_s390_fini); MODULE_ALIAS_CRYPTO("aes-all"); diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 9e05cc453a40..fba1c10a2dd0 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/des.h> @@ -616,7 +617,7 @@ static void __exit des_s390_exit(void) crypto_unregister_alg(&des_alg); } -module_init(des_s390_init); +module_cpu_feature_match(MSA, des_s390_init); module_exit(des_s390_exit); MODULE_ALIAS_CRYPTO("des"); diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index b258110da952..26e14efd30a7 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -9,6 +9,7 @@ #include <crypto/internal/hash.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include "crypt_s390.h" @@ -158,7 +159,7 @@ static void __exit ghash_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -module_init(ghash_mod_init); +module_cpu_feature_match(MSA, ghash_mod_init); module_exit(ghash_mod_exit); MODULE_ALIAS_CRYPTO("ghash"); diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 9d5192c94963..b8045b97f4fb 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> +#include <linux/cpufeature.h> #include <linux/random.h> #include <linux/slab.h> #include <asm/debug.h> @@ -914,6 +915,5 @@ static void __exit prng_exit(void) } } - -module_init(prng_init); +module_cpu_feature_match(MSA, prng_init); module_exit(prng_exit); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 5b2bee323694..9208eadae9f0 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -26,6 +26,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <crypto/sha.h> #include "crypt_s390.h" @@ -100,7 +101,7 @@ static void __exit sha1_s390_fini(void) crypto_unregister_shash(&alg); } -module_init(sha1_s390_init); +module_cpu_feature_match(MSA, sha1_s390_init); module_exit(sha1_s390_fini); MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index b74ff158108c..667888f5c964 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -16,6 +16,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <crypto/sha.h> #include "crypt_s390.h" @@ -140,7 +141,7 @@ static void __exit sha256_s390_fini(void) crypto_unregister_shash(&sha256_alg); } -module_init(sha256_s390_init); +module_cpu_feature_match(MSA, sha256_s390_init); module_exit(sha256_s390_fini); MODULE_ALIAS_CRYPTO("sha256"); diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 0c36989ba182..2ba66b1518f0 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include "sha.h" #include "crypt_s390.h" @@ -148,7 +149,7 @@ static void __exit fini(void) crypto_unregister_shash(&sha384_alg); } -module_init(init); +module_cpu_feature_match(MSA, init); module_exit(fini); MODULE_LICENSE("GPL"); diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h new file mode 100644 index 000000000000..fa7e69b7c299 --- /dev/null +++ b/arch/s390/include/asm/cpufeature.h @@ -0,0 +1,29 @@ +/* + * Module interface for CPU features + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef __ASM_S390_CPUFEATURE_H +#define __ASM_S390_CPUFEATURE_H + +#include <asm/elf.h> + +/* Hardware features on Linux on z Systems are indicated by facility bits that + * are mapped to the so-called machine flags. Particular machine flags are + * then used to define ELF hardware capabilities; most notably hardware flags + * that are essential for user space / glibc. + * + * Restrict the set of exposed CPU features to ELF hardware capabilities for + * now. Additional machine flags can be indicated by values larger than + * MAX_ELF_HWCAP_FEATURES. + */ +#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap)) +#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES + +#define cpu_feature(feat) ilog2(HWCAP_S390_ ## feat) + +int cpu_have_feature(unsigned int nr); + +#endif /* __ASM_S390_CPUFEATURE_H */ diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..17a373576868 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -46,6 +46,8 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } +void __ctl_set_vx(void); + void smp_ctl_set_bit(int cr, int bit); void smp_ctl_clear_bit(int cr, int bit); diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index 629b79a93165..f7e5c36688c3 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h @@ -214,6 +214,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func) void etr_switch_to_local(void); void etr_sync_check(void); +/* notifier for syncs */ +extern struct atomic_notifier_head s390_epoch_delta_notifier; + /* STP interruption parameter */ struct stp_irq_parm { unsigned int _pad0 : 14; diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h new file mode 100644 index 000000000000..55dc2c0fb40a --- /dev/null +++ b/arch/s390/include/asm/fpu-internal.h @@ -0,0 +1,110 @@ +/* + * General floating pointer and vector register helpers + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef _ASM_S390_FPU_INTERNAL_H +#define _ASM_S390_FPU_INTERNAL_H + +#define FPU_USE_VX 1 /* Vector extension is active */ + +#ifndef __ASSEMBLY__ + +#include <linux/errno.h> +#include <linux/string.h> +#include <asm/linkage.h> +#include <asm/ctl_reg.h> +#include <asm/sigcontext.h> + +struct fpu { + __u32 fpc; /* Floating-point control */ + __u32 flags; + union { + void *regs; + freg_t *fprs; /* Floating-point register save area */ + __vector128 *vxrs; /* Vector register save area */ + }; +}; + +void save_fpu_regs(void); + +#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) +#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) + +/* VX array structure for address operand constraints in inline assemblies */ +struct vx_array { __vector128 _[__NUM_VXRS]; }; + +static inline int test_fp_ctl(u32 fpc) +{ + u32 orig_fpc; + int rc; + + asm volatile( + " efpc %1\n" + " sfpc %2\n" + "0: sfpc %1\n" + " la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc), "=d" (orig_fpc) + : "d" (fpc), "0" (-EINVAL)); + return rc; +} + +static inline void save_vx_regs_safe(__vector128 *vxrs) +{ + unsigned long cr0, flags; + + flags = arch_local_irq_save(); + __ctl_store(cr0, 0, 0); + __ctl_set_bit(0, 17); + __ctl_set_bit(0, 18); + asm volatile( + " la 1,%0\n" + " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ + " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ + : "=Q" (*(struct vx_array *) vxrs) : : "1"); + __ctl_load(cr0, 0, 0); + arch_local_irq_restore(flags); +} + +static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + fprs[i] = *(freg_t *)(vxrs + i); +} + +static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + *(freg_t *)(vxrs + i) = fprs[i]; +} + +static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + fpregs->pad = 0; + if (is_vx_fpu(fpu)) + convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); + else + memcpy((freg_t *)&fpregs->fprs, fpu->fprs, + sizeof(fpregs->fprs)); +} + +static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + if (is_vx_fpu(fpu)) + convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); + else + memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, + sizeof(fpregs->fprs)); +} + +#endif + +#endif /* _ASM_S390_FPU_INTERNAL_H */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3024acbe1f9d..3d012e071647 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -22,6 +22,7 @@ #include <linux/kvm.h> #include <asm/debug.h> #include <asm/cpu.h> +#include <asm/fpu-internal.h> #include <asm/isc.h> #define KVM_MAX_VCPUS 64 @@ -258,6 +259,9 @@ struct kvm_vcpu_stat { u32 diagnose_10; u32 diagnose_44; u32 diagnose_9c; + u32 diagnose_258; + u32 diagnose_308; + u32 diagnose_500; }; #define PGM_OPERATION 0x01 @@ -498,10 +502,9 @@ struct kvm_guestdbg_info_arch { struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; - s390_fp_regs host_fpregs; unsigned int host_acrs[NUM_ACRS]; - s390_fp_regs guest_fpregs; - struct kvm_s390_vregs *host_vregs; + struct fpu host_fpregs; + struct fpu guest_fpregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; @@ -630,7 +633,6 @@ extern char sie_exit; static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_check_processor_compat(void *rtn) {} -static inline void kvm_arch_exit(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index fc8a8284778e..27da78cf416d 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -6,4 +6,26 @@ #define __ALIGN .align 4, 0x07 #define __ALIGN_STR __stringify(__ALIGN) +#ifndef __ASSEMBLY__ + +/* + * Helper macro for exception table entries + */ +#define EX_TABLE(_fault, _target) \ + ".section __ex_table,\"a\"\n" \ + ".align 4\n" \ + ".long (" #_fault ") - .\n" \ + ".long (" #_target ") - .\n" \ + ".previous\n" + +#else /* __ASSEMBLY__ */ + +#define EX_TABLE(_fault, _target) \ + .section __ex_table,"a" ; \ + .align 4 ; \ + .long (_fault) - . ; \ + .long (_target) - . ; \ + .previous + +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h new file mode 100644 index 000000000000..a9e834e60b84 --- /dev/null +++ b/arch/s390/include/asm/mmzone.h @@ -0,0 +1,16 @@ +/* + * NUMA support for s390 + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_MMZONE_H +#define _ASM_S390_MMZONE_H + +#ifdef CONFIG_NUMA + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) + +#endif /* CONFIG_NUMA */ +#endif /* _ASM_S390_MMZONE_H */ diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h new file mode 100644 index 000000000000..2a0efc63b9e5 --- /dev/null +++ b/arch/s390/include/asm/numa.h @@ -0,0 +1,35 @@ +/* + * NUMA support for s390 + * + * Declare the NUMA core code structures and functions. + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_NUMA_H +#define _ASM_S390_NUMA_H + +#ifdef CONFIG_NUMA + +#include <linux/numa.h> +#include <linux/cpumask.h> + +void numa_setup(void); +int numa_pfn_to_nid(unsigned long pfn); +int __node_distance(int a, int b); +void numa_update_cpu_topology(void); + +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +extern int numa_debug_enabled; + +#else + +static inline void numa_setup(void) { } +static inline void numa_update_cpu_topology(void) { } +static inline int numa_pfn_to_nid(unsigned long pfn) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ +#endif /* _ASM_S390_NUMA_H */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a648338c434a..34d960353a08 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -170,7 +170,11 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {} #endif /* CONFIG_HOTPLUG_PCI_S390 */ /* Helpers */ -struct zpci_dev *get_zdev(struct pci_dev *); +static inline struct zpci_dev *to_zpci(struct pci_dev *pdev) +{ + return pdev->sysdata; +} + struct zpci_dev *get_zdev_by_fid(u32); /* DMA */ @@ -188,4 +192,20 @@ void zpci_debug_init_device(struct zpci_dev *); void zpci_debug_exit_device(struct zpci_dev *); void zpci_debug_info(struct zpci_dev *, struct seq_file *); +#ifdef CONFIG_NUMA + +/* Returns the node based on PCI bus */ +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + return NUMA_NO_NODE; +} + +static inline const struct cpumask * +cpumask_of_pcibus(const struct pci_bus *bus) +{ + return cpu_online_mask; +} + +#endif /* CONFIG_NUMA */ + #endif diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index f66d82798a6a..bdb2f51124ed 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -576,6 +576,19 @@ static inline int pte_same(pte_t a, pte_t b) return pte_val(a) == pte_val(b); } +#ifdef CONFIG_NUMA_BALANCING +static inline int pte_protnone(pte_t pte) +{ + return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); +} + +static inline int pmd_protnone(pmd_t pmd) +{ + /* pmd_large(pmd) implies pmd_present(pmd) */ + return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); +} +#endif + static inline pgste_t pgste_get_lock(pte_t *ptep) { unsigned long new = 0; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index dedb6218544b..085fb0d3c54e 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,10 +14,12 @@ #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ +#define CIF_FPU 3 /* restore vector registers */ #define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) #define _CIF_ASCE (1<<CIF_ASCE) #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) +#define _CIF_FPU (1<<CIF_FPU) #ifndef __ASSEMBLY__ @@ -28,6 +30,7 @@ #include <asm/ptrace.h> #include <asm/setup.h> #include <asm/runtime_instr.h> +#include <asm/fpu-internal.h> static inline void set_cpu_flag(int flag) { @@ -85,7 +88,7 @@ typedef struct { * Thread structure */ struct thread_struct { - s390_fp_regs fp_regs; + struct fpu fpu; /* FP and VX register save area */ unsigned int acrs[NUM_ACRS]; unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; @@ -101,7 +104,6 @@ struct thread_struct { struct runtime_instr_cb *ri_cb; int ri_signum; unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ - __vector128 *vxrs; /* Vector register save area */ }; /* Flag to disable transactions. */ @@ -231,6 +233,17 @@ static inline void __load_psw_mask (unsigned long mask) } /* + * Extract current PSW mask + */ +static inline unsigned long __extract_psw(void) +{ + unsigned int reg1, reg2; + + asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2)); + return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); +} + +/* * Rewind PSW instruction address by specified number of bytes. */ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) @@ -336,25 +349,6 @@ extern void memcpy_absolute(void *, void *, size_t); memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ } -/* - * Helper macro for exception table entries - */ -#define EX_TABLE(_fault, _target) \ - ".section __ex_table,\"a\"\n" \ - ".align 4\n" \ - ".long (" #_fault ") - .\n" \ - ".long (" #_target ") - .\n" \ - ".previous\n" - -#else /* __ASSEMBLY__ */ - -#define EX_TABLE(_fault, _target) \ - .section __ex_table,"a" ; \ - .align 4 ; \ - .long (_fault) - . ; \ - .long (_target) - . ; \ - .previous - #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index f6ff06077631..821dde5f425d 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -79,6 +79,6 @@ int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); void sclp_early_detect(void); -long _sclp_print_early(const char *); +int _sclp_print_early(const char *); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index d62e7a69605f..dcadfde32265 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -8,139 +8,12 @@ #define __ASM_SWITCH_TO_H #include <linux/thread_info.h> +#include <asm/fpu-internal.h> #include <asm/ptrace.h> extern struct task_struct *__switch_to(void *, void *); extern void update_cr_regs(struct task_struct *task); -static inline int test_fp_ctl(u32 fpc) -{ - u32 orig_fpc; - int rc; - - asm volatile( - " efpc %1\n" - " sfpc %2\n" - "0: sfpc %1\n" - " la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc), "=d" (orig_fpc) - : "d" (fpc), "0" (-EINVAL)); - return rc; -} - -static inline void save_fp_ctl(u32 *fpc) -{ - asm volatile( - " stfpc %0\n" - : "+Q" (*fpc)); -} - -static inline int restore_fp_ctl(u32 *fpc) -{ - int rc; - - asm volatile( - " lfpc %1\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); - return rc; -} - -static inline void save_fp_regs(freg_t *fprs) -{ - asm volatile("std 0,%0" : "=Q" (fprs[0])); - asm volatile("std 2,%0" : "=Q" (fprs[2])); - asm volatile("std 4,%0" : "=Q" (fprs[4])); - asm volatile("std 6,%0" : "=Q" (fprs[6])); - asm volatile("std 1,%0" : "=Q" (fprs[1])); - asm volatile("std 3,%0" : "=Q" (fprs[3])); - asm volatile("std 5,%0" : "=Q" (fprs[5])); - asm volatile("std 7,%0" : "=Q" (fprs[7])); - asm volatile("std 8,%0" : "=Q" (fprs[8])); - asm volatile("std 9,%0" : "=Q" (fprs[9])); - asm volatile("std 10,%0" : "=Q" (fprs[10])); - asm volatile("std 11,%0" : "=Q" (fprs[11])); - asm volatile("std 12,%0" : "=Q" (fprs[12])); - asm volatile("std 13,%0" : "=Q" (fprs[13])); - asm volatile("std 14,%0" : "=Q" (fprs[14])); - asm volatile("std 15,%0" : "=Q" (fprs[15])); -} - -static inline void restore_fp_regs(freg_t *fprs) -{ - asm volatile("ld 0,%0" : : "Q" (fprs[0])); - asm volatile("ld 2,%0" : : "Q" (fprs[2])); - asm volatile("ld 4,%0" : : "Q" (fprs[4])); - asm volatile("ld 6,%0" : : "Q" (fprs[6])); - asm volatile("ld 1,%0" : : "Q" (fprs[1])); - asm volatile("ld 3,%0" : : "Q" (fprs[3])); - asm volatile("ld 5,%0" : : "Q" (fprs[5])); - asm volatile("ld 7,%0" : : "Q" (fprs[7])); - asm volatile("ld 8,%0" : : "Q" (fprs[8])); - asm volatile("ld 9,%0" : : "Q" (fprs[9])); - asm volatile("ld 10,%0" : : "Q" (fprs[10])); - asm volatile("ld 11,%0" : : "Q" (fprs[11])); - asm volatile("ld 12,%0" : : "Q" (fprs[12])); - asm volatile("ld 13,%0" : : "Q" (fprs[13])); - asm volatile("ld 14,%0" : : "Q" (fprs[14])); - asm volatile("ld 15,%0" : : "Q" (fprs[15])); -} - -static inline void save_vx_regs(__vector128 *vxrs) -{ - typedef struct { __vector128 _[__NUM_VXRS]; } addrtype; - - asm volatile( - " la 1,%0\n" - " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ - " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ - : "=Q" (*(addrtype *) vxrs) : : "1"); -} - -static inline void save_vx_regs_safe(__vector128 *vxrs) -{ - unsigned long cr0, flags; - - flags = arch_local_irq_save(); - __ctl_store(cr0, 0, 0); - __ctl_set_bit(0, 17); - __ctl_set_bit(0, 18); - save_vx_regs(vxrs); - __ctl_load(cr0, 0, 0); - arch_local_irq_restore(flags); -} - -static inline void restore_vx_regs(__vector128 *vxrs) -{ - typedef struct { __vector128 _[__NUM_VXRS]; } addrtype; - - asm volatile( - " la 1,%0\n" - " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ - " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ - : : "Q" (*(addrtype *) vxrs) : "1"); -} - -static inline void save_fp_vx_regs(struct task_struct *task) -{ - if (task->thread.vxrs) - save_vx_regs(task->thread.vxrs); - else - save_fp_regs(task->thread.fp_regs.fprs); -} - -static inline void restore_fp_vx_regs(struct task_struct *task) -{ - if (task->thread.vxrs) - restore_vx_regs(task->thread.vxrs); - else - restore_fp_regs(task->thread.fp_regs.fprs); -} - static inline void save_access_regs(unsigned int *acrs) { typedef struct { int _[NUM_ACRS]; } acrstype; @@ -157,15 +30,13 @@ static inline void restore_access_regs(unsigned int *acrs) #define switch_to(prev,next,last) do { \ if (prev->mm) { \ - save_fp_ctl(&prev->thread.fp_regs.fpc); \ - save_fp_vx_regs(prev); \ + save_fpu_regs(); \ save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ if (next->mm) { \ update_cr_regs(next); \ - restore_fp_ctl(&next->thread.fp_regs.fpc); \ - restore_fp_vx_regs(next); \ + set_cpu_flag(CIF_FPU); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ } \ diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 4990f6c66288..27ebde643933 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -2,6 +2,7 @@ #define _ASM_S390_TOPOLOGY_H #include <linux/cpumask.h> +#include <asm/numa.h> struct sysinfo_15_1_x; struct cpu; @@ -13,6 +14,7 @@ struct cpu_topology_s390 { unsigned short core_id; unsigned short socket_id; unsigned short book_id; + unsigned short node_id; cpumask_t thread_mask; cpumask_t core_mask; cpumask_t book_mask; @@ -52,6 +54,43 @@ static inline void topology_expect_change(void) { } #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) +#define SD_BOOK_INIT SD_CPU_INIT + +#ifdef CONFIG_NUMA + +#define cpu_to_node cpu_to_node +static inline int cpu_to_node(int cpu) +{ + return per_cpu(cpu_topology, cpu).node_id; +} + +/* Returns a pointer to the cpumask of CPUs on node 'node'. */ +#define cpumask_of_node cpumask_of_node +static inline const struct cpumask *cpumask_of_node(int node) +{ + return node_to_cpumask_map[node]; +} + +/* + * Returns the number of the node containing node 'node'. This + * architecture is flat, so it is a pretty simple function! + */ +#define parent_node(node) (node) + +#define pcibus_to_node(bus) __pcibus_to_node(bus) + +#define node_distance(a, b) __node_distance(a, b) + +#else /* !CONFIG_NUMA */ + +#define numa_node_id numa_node_id +static inline int numa_node_id(void) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ + #include <asm-generic/topology.h> #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 91f56b1d8156..525cef73b085 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -11,16 +11,24 @@ #define __IGNORE_time -/* Ignore NUMA system calls. Not wired up on s390. */ -#define __IGNORE_mbind -#define __IGNORE_get_mempolicy -#define __IGNORE_set_mempolicy -#define __IGNORE_migrate_pages -#define __IGNORE_move_pages - -/* Ignore system calls that are also reachable via sys_socket */ +/* Ignore system calls that are also reachable via sys_socketcall */ #define __IGNORE_recvmmsg #define __IGNORE_sendmmsg +#define __IGNORE_socket +#define __IGNORE_socketpair +#define __IGNORE_bind +#define __IGNORE_connect +#define __IGNORE_listen +#define __IGNORE_accept4 +#define __IGNORE_getsockopt +#define __IGNORE_setsockopt +#define __IGNORE_getsockname +#define __IGNORE_getpeername +#define __IGNORE_sendto +#define __IGNORE_sendmsg +#define __IGNORE_recvfrom +#define __IGNORE_recvmsg +#define __IGNORE_shutdown #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h new file mode 100644 index 000000000000..4a3135620f5e --- /dev/null +++ b/arch/s390/include/asm/vx-insn.h @@ -0,0 +1,480 @@ +/* + * Support for Vector Instructions + * + * Assembler macros to generate .byte/.word code for particular + * vector instructions that are supported by recent binutils (>= 2.26) only. + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef __ASM_S390_VX_INSN_H +#define __ASM_S390_VX_INSN_H + +#ifdef __ASSEMBLY__ + + +/* Macros to generate vector instruction byte code */ + +#define REG_NUM_INVALID 255 + +/* GR_NUM - Retrieve general-purpose register number + * + * @opd: Operand to store register number + * @r64: String designation register in the format "%rN" + */ +.macro GR_NUM opd gr + \opd = REG_NUM_INVALID + .ifc \gr,%r0 + \opd = 0 + .endif + .ifc \gr,%r1 + \opd = 1 + .endif + .ifc \gr,%r2 + \opd = 2 + .endif + .ifc \gr,%r3 + \opd = 3 + .endif + .ifc \gr,%r4 + \opd = 4 + .endif + .ifc \gr,%r5 + \opd = 5 + .endif + .ifc \gr,%r6 + \opd = 6 + .endif + .ifc \gr,%r7 + \opd = 7 + .endif + .ifc \gr,%r8 + \opd = 8 + .endif + .ifc \gr,%r9 + \opd = 9 + .endif + .ifc \gr,%r10 + \opd = 10 + .endif + .ifc \gr,%r11 + \opd = 11 + .endif + .ifc \gr,%r12 + \opd = 12 + .endif + .ifc \gr,%r13 + \opd = 13 + .endif + .ifc \gr,%r14 + \opd = 14 + .endif + .ifc \gr,%r15 + \opd = 15 + .endif + .if \opd == REG_NUM_INVALID + .error "Invalid general-purpose register designation: \gr" + .endif +.endm + +/* VX_R() - Macro to encode the VX_NUM into the instruction */ +#define VX_R(v) (v & 0x0F) + +/* VX_NUM - Retrieve vector register number + * + * @opd: Operand to store register number + * @vxr: String designation register in the format "%vN" + * + * The vector register number is used for as input number to the + * instruction and, as well as, to compute the RXB field of the + * instruction. To encode the particular vector register number, + * use the VX_R(v) macro to extract the instruction opcode. + */ +.macro VX_NUM opd vxr + \opd = REG_NUM_INVALID + .ifc \vxr,%v0 + \opd = 0 + .endif + .ifc \vxr,%v1 + \opd = 1 + .endif + .ifc \vxr,%v2 + \opd = 2 + .endif + .ifc \vxr,%v3 + \opd = 3 + .endif + .ifc \vxr,%v4 + \opd = 4 + .endif + .ifc \vxr,%v5 + \opd = 5 + .endif + .ifc \vxr,%v6 + \opd = 6 + .endif + .ifc \vxr,%v7 + \opd = 7 + .endif + .ifc \vxr,%v8 + \opd = 8 + .endif + .ifc \vxr,%v9 + \opd = 9 + .endif + .ifc \vxr,%v10 + \opd = 10 + .endif + .ifc \vxr,%v11 + \opd = 11 + .endif + .ifc \vxr,%v12 + \opd = 12 + .endif + .ifc \vxr,%v13 + \opd = 13 + .endif + .ifc \vxr,%v14 + \opd = 14 + .endif + .ifc \vxr,%v15 + \opd = 15 + .endif + .ifc \vxr,%v16 + \opd = 16 + .endif + .ifc \vxr,%v17 + \opd = 17 + .endif + .ifc \vxr,%v18 + \opd = 18 + .endif + .ifc \vxr,%v19 + \opd = 19 + .endif + .ifc \vxr,%v20 + \opd = 20 + .endif + .ifc \vxr,%v21 + \opd = 21 + .endif + .ifc \vxr,%v22 + \opd = 22 + .endif + .ifc \vxr,%v23 + \opd = 23 + .endif + .ifc \vxr,%v24 + \opd = 24 + .endif + .ifc \vxr,%v25 + \opd = 25 + .endif + .ifc \vxr,%v26 + \opd = 26 + .endif + .ifc \vxr,%v27 + \opd = 27 + .endif + .ifc \vxr,%v28 + \opd = 28 + .endif + .ifc \vxr,%v29 + \opd = 29 + .endif + .ifc \vxr,%v30 + \opd = 30 + .endif + .ifc \vxr,%v31 + \opd = 31 + .endif + .if \opd == REG_NUM_INVALID + .error "Invalid vector register designation: \vxr" + .endif +.endm + +/* RXB - Compute most significant bit used vector registers + * + * @rxb: Operand to store computed RXB value + * @v1: First vector register designated operand + * @v2: Second vector register designated operand + * @v3: Third vector register designated operand + * @v4: Fourth vector register designated operand + */ +.macro RXB rxb v1 v2=0 v3=0 v4=0 + \rxb = 0 + .if \v1 & 0x10 + \rxb = \rxb | 0x08 + .endif + .if \v2 & 0x10 + \rxb = \rxb | 0x04 + .endif + .if \v3 & 0x10 + \rxb = \rxb | 0x02 + .endif + .if \v4 & 0x10 + \rxb = \rxb | 0x01 + .endif +.endm + +/* MRXB - Generate Element Size Control and RXB value + * + * @m: Element size control + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXB m v1 v2=0 v3=0 v4=0 + rxb = 0 + RXB rxb, \v1, \v2, \v3, \v4 + .byte (\m << 4) | rxb +.endm + +/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields + * + * @m: Element size control + * @opc: Opcode + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0 + MRXB \m, \v1, \v2, \v3, \v4 + .byte \opc +.endm + +/* Vector support instructions */ + +/* VECTOR GENERATE BYTE MASK */ +.macro VGBM vr imm2 + VX_NUM v1, \vr + .word (0xE700 | (VX_R(v1) << 4)) + .word \imm2 + MRXBOPC 0, 0x44, v1 +.endm +.macro VZERO vxr + VGBM \vxr, 0 +.endm +.macro VONE vxr + VGBM \vxr, 0xFFFF +.endm + +/* VECTOR LOAD VR ELEMENT FROM GR */ +.macro VLVG v, gr, disp, m + VX_NUM v1, \v + GR_NUM b2, "%r0" + GR_NUM r3, \gr + .word 0xE700 | (VX_R(v1) << 4) | r3 + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x22, v1 +.endm +.macro VLVGB v, gr, index, base + VLVG \v, \gr, \index, \base, 0 +.endm +.macro VLVGH v, gr, index + VLVG \v, \gr, \index, 1 +.endm +.macro VLVGF v, gr, index + VLVG \v, \gr, \index, 2 +.endm +.macro VLVGG v, gr, index + VLVG \v, \gr, \index, 3 +.endm + +/* VECTOR LOAD */ +.macro VL v, disp, index="%r0", base + VX_NUM v1, \v + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | (VX_R(v1) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x06, v1 +.endm + +/* VECTOR LOAD ELEMENT */ +.macro VLEx vr1, disp, index="%r0", base, m3, opc + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | (VX_R(v1) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEB vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x00 +.endm +.macro VLEH vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x01 +.endm +.macro VLEF vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x03 +.endm +.macro VLEG vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x02 +.endm + +/* VECTOR LOAD ELEMENT IMMEDIATE */ +.macro VLEIx vr1, imm2, m3, opc + VX_NUM v1, \vr1 + .word 0xE700 | (VX_R(v1) << 4) + .word \imm2 + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEIB vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x40 +.endm +.macro VLEIH vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x41 +.endm +.macro VLEIF vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x43 +.endm +.macro VLEIG vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x42 +.endm + +/* VECTOR LOAD GR FROM VR ELEMENT */ +.macro VLGV gr, vr, disp, base="%r0", m + GR_NUM r1, \gr + GR_NUM b2, \base + VX_NUM v3, \vr + .word 0xE700 | (r1 << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x21, v3 +.endm +.macro VLGVB gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 0 +.endm +.macro VLGVH gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 1 +.endm +.macro VLGVF gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 2 +.endm +.macro VLGVG gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 3 +.endm + +/* VECTOR LOAD MULTIPLE */ +.macro VLM vfrom, vto, disp, base + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base /* Base register */ + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x36, v1, v3 +.endm + +/* VECTOR STORE MULTIPLE */ +.macro VSTM vfrom, vto, disp, base + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base /* Base register */ + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x3E, v1, v3 +.endm + +/* VECTOR PERMUTE */ +.macro VPERM vr1, vr2, vr3, vr4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4 +.endm + +/* VECTOR UNPACK LOGICAL LOW */ +.macro VUPLL vr1, vr2, m3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word 0x0000 + MRXBOPC \m3, 0xD4, v1, v2 +.endm +.macro VUPLLB vr1, vr2 + VUPLL \vr1, \vr2, 0 +.endm +.macro VUPLLH vr1, vr2 + VUPLL \vr1, \vr2, 1 +.endm +.macro VUPLLF vr1, vr2 + VUPLL \vr1, \vr2, 2 +.endm + + +/* Vector integer instructions */ + +/* VECTOR EXCLUSIVE OR */ +.macro VX vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC 0, 0x6D, v1, v2, v3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM */ +.macro VGFM vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC \m4, 0xB4, v1, v2, v3 +.endm +.macro VGFMB vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 0 +.endm +.macro VGFMH vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 1 +.endm +.macro VGFMF vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 2 +.endm +.macro VGFMG vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ +.macro VGFMA vr1, vr2, vr3, vr4, m5 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) | (\m5 << 8) + MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4 +.endm +.macro VGFMAB vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 0 +.endm +.macro VGFMAH vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 1 +.endm +.macro VGFMAF vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 2 +.endm +.macro VGFMAG vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 3 +.endm + +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ +.macro VSRLB vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC 0, 0x7D, v1, v2, v3 +.endm + + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_S390_VX_INSN_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 67878af257a0..59d2bb4e2d0c 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -204,9 +204,9 @@ #define __NR_statfs64 265 #define __NR_fstatfs64 266 #define __NR_remap_file_pages 267 -/* Number 268 is reserved for new sys_mbind */ -/* Number 269 is reserved for new sys_get_mempolicy */ -/* Number 270 is reserved for new sys_set_mempolicy */ +#define __NR_mbind 268 +#define __NR_get_mempolicy 269 +#define __NR_set_mempolicy 270 #define __NR_mq_open 271 #define __NR_mq_unlink 272 #define __NR_mq_timedsend 273 @@ -223,7 +223,7 @@ #define __NR_inotify_init 284 #define __NR_inotify_add_watch 285 #define __NR_inotify_rm_watch 286 -/* Number 287 is reserved for new sys_migrate_pages */ +#define __NR_migrate_pages 287 #define __NR_openat 288 #define __NR_mkdirat 289 #define __NR_mknodat 290 @@ -245,7 +245,7 @@ #define __NR_sync_file_range 307 #define __NR_tee 308 #define __NR_vmsplice 309 -/* Number 310 is reserved for new sys_move_pages */ +#define __NR_move_pages 310 #define __NR_getcpu 311 #define __NR_epoll_pwait 312 #define __NR_utimes 313 diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ffb87617a36c..b756c6348ac6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -28,6 +28,17 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_sysinfo.o += -w +# +# Use -march=z900 for sclp.c to be able to print an error message if +# the kernel is started on a machine which is too old +# +CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE) +ifneq ($(CC_FLAGS_MARCH),-march=z900) +CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH) +CFLAGS_sclp.o += -march=z900 +endif +GCOV_PROFILE_sclp.o := n + obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index a2da259d9327..48c9af7a7683 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -28,6 +28,9 @@ int main(void) DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); + DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); + DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); + DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index fe8d6924efaa..eb4664238613 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -153,33 +153,14 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) /* Store registers needed to create the signal frame */ static void store_sigregs(void) { - int i; - save_access_regs(current->thread.acrs); - save_fp_ctl(¤t->thread.fp_regs.fpc); - if (current->thread.vxrs) { - save_vx_regs(current->thread.vxrs); - for (i = 0; i < __NUM_FPRS; i++) - current->thread.fp_regs.fprs[i] = - *(freg_t *)(current->thread.vxrs + i); - } else - save_fp_regs(current->thread.fp_regs.fprs); + save_fpu_regs(); } /* Load registers after signal return */ static void load_sigregs(void) { - int i; - restore_access_regs(current->thread.acrs); - /* restore_fp_ctl is done in restore_sigregs */ - if (current->thread.vxrs) { - for (i = 0; i < __NUM_FPRS; i++) - *(freg_t *)(current->thread.vxrs + i) = - current->thread.fp_regs.fprs[i]; - restore_vx_regs(current->thread.vxrs); - } else - restore_fp_regs(current->thread.fp_regs.fprs); } static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) @@ -196,8 +177,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, - sizeof(user_sregs.fpregs)); + fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) return -EFAULT; return 0; @@ -217,8 +197,8 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) return -EINVAL; - /* Loading the floating-point-control word can fail. Do that first. */ - if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + /* Test the floating-point-control word. */ + if (test_fp_ctl(user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ @@ -235,9 +215,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - - memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, - sizeof(current->thread.fp_regs)); + fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -258,13 +236,13 @@ static int save_sigregs_ext32(struct pt_regs *regs, return -EFAULT; /* Save vector registers to signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.vxrs + __NUM_VXRS_LOW, + current->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -286,15 +264,15 @@ static int restore_sigregs_ext32(struct pt_regs *regs, *(__u32 *)®s->gprs[i] = gprs_high[i]; /* Restore vector registers from signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; + *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; } return 0; } @@ -308,6 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; set_current_blocked(&set); + save_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext32(regs, &frame->sregs_ext)) @@ -330,6 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; + save_fpu_regs(); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) @@ -472,7 +452,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, */ uc_flags = UC_GPRS_HIGH; if (MACHINE_HAS_VX) { - if (current->thread.vxrs) + if (is_vx_task(current)) uc_flags |= UC_VXRS; } else frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 84062e7a77da..247b7aae4c6d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -20,6 +20,8 @@ #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> +#include <asm/fpu-internal.h> +#include <asm/vx-insn.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -46,10 +48,10 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_UPROBE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) _PIF_WORK = (_PIF_PER_TRAP) -#define BASED(name) name-system_call(%r13) +#define BASED(name) name-cleanup_critical(%r13) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS @@ -73,38 +75,6 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro LPP newpp -#if IS_ENABLED(CONFIG_KVM) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .+8 - .insn s,0xb2800000,\newpp -#endif - .endm - - .macro HANDLE_SIE_INTERCEPT scratch,reason -#if IS_ENABLED(CONFIG_KVM) - tmhh %r8,0x0001 # interrupting from user ? - jnz .+62 - lgr \scratch,%r9 - slg \scratch,BASED(.Lsie_critical) - clg \scratch,BASED(.Lsie_critical_length) - .if \reason==1 - # Some program interrupts are suppressing (e.g. protection). - # We must also check the instruction after SIE in that case. - # do_protection_exception will rewind to .Lrewind_pad - jh .+42 - .else - jhe .+42 - .endif - lg %r14,__SF_EMPTY(%r15) # get control block pointer - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - larl %r9,sie_exit # skip forward to sie_exit - mvi __SF_EMPTY+31(%r15),\reason # set exit reason -#endif - .endm - .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK tml %r15,\stacksize - CONFIG_STACK_GUARD @@ -113,7 +83,7 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro SWITCH_ASYNC savearea,stack,shift + .macro SWITCH_ASYNC savearea,timer tmhh %r8,0x0001 # interrupting from user ? jnz 1f lgr %r14,%r9 @@ -124,26 +94,28 @@ _PIF_WORK = (_PIF_PER_TRAP) brasl %r14,cleanup_critical tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f -0: lg %r14,\stack # are we already on the target stack? +0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? slgr %r14,%r15 - srag %r14,%r14,\shift - jnz 1f - CHECK_STACK 1<<\shift,\savearea + srag %r14,%r14,STACK_SHIFT + jnz 2f + CHECK_STACK 1<<STACK_SHIFT,\savearea aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: lg %r15,\stack # load target stack -2: la %r11,STACK_FRAME_OVERHEAD(%r15) + j 3f +1: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,\timer +2: lg %r15,__LC_ASYNC_STACK # load async stack +3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro UPDATE_VTIME scratch,enter_timer - lg \scratch,__LC_EXIT_TIMER - slg \scratch,\enter_timer - alg \scratch,__LC_USER_TIMER - stg \scratch,__LC_USER_TIMER - lg \scratch,__LC_LAST_UPDATE_TIMER - slg \scratch,__LC_EXIT_TIMER - alg \scratch,__LC_SYSTEM_TIMER - stg \scratch,__LC_SYSTEM_TIMER + .macro UPDATE_VTIME w1,w2,enter_timer + lg \w1,__LC_EXIT_TIMER + lg \w2,__LC_LAST_UPDATE_TIMER + slg \w1,\enter_timer + slg \w2,__LC_EXIT_TIMER + alg \w1,__LC_USER_TIMER + alg \w2,__LC_SYSTEM_TIMER + stg \w1,__LC_USER_TIMER + stg \w2,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm @@ -197,6 +169,69 @@ ENTRY(__switch_to) br %r14 .L__critical_start: + +#if IS_ENABLED(CONFIG_KVM) +/* + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area + */ +ENTRY(sie64a) + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason + tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? + jno .Lsie_load_guest_gprs + brasl %r14,load_fpu_regs # load guest fp/vx regs +.Lsie_load_guest_gprs: + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz .Lsie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +.Lsie_gmap: + lg %r14,__SF_EMPTY(%r15) # get control block pointer + oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now + tm __SIE_PROG20+3(%r14),3 # last exit... + jnz .Lsie_skip + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsie_skip # exit if fp/vx regs changed + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_enter + .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid +.Lsie_enter: + sie 0(%r14) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_skip + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +.Lsie_skip: + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce +.Lsie_done: +# some program checks are suppressing. C code (e.g. do_protection_exception) +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +# instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# See also .Lcleanup_sie +.Lrewind_pad: + nop 0 + .globl sie_exit +sie_exit: + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lg %r2,__SF_EMPTY+24(%r15) # return exit reason code + br %r14 +.Lsie_fault: + lghi %r14,-EFAULT + stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + j sie_exit + + EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) +#endif + /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. @@ -212,9 +247,9 @@ ENTRY(system_call) .Lsysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -.Lsysc_vtime: - UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER LAST_BREAK %r13 +.Lsysc_vtime: + UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW @@ -244,8 +279,6 @@ ENTRY(system_call) .Lsysc_return: LOCKDEP_SYS_EXIT .Lsysc_tif: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lsysc_restore tm __PT_FLAGS+7(%r11),_PIF_WORK jnz .Lsysc_work tm __TI_flags+7(%r12),_TIF_WORK @@ -280,6 +313,8 @@ ENTRY(system_call) jo .Lsysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsysc_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lsysc_uaccess j .Lsysc_return # beware of critical section cleanup @@ -307,6 +342,13 @@ ENTRY(system_call) j .Lsysc_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lsysc_vxrs: + larl %r14,.Lsysc_return + jg load_fpu_regs + +# # _TIF_SIGPENDING is set, call do_signal # .Lsysc_sigpending: @@ -405,28 +447,35 @@ ENTRY(pgm_check_handler) stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_PGM_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,1 tmhh %r8,0x0001 # test problem state bit - jnz 1f # -> fault in user space - tmhh %r8,0x4000 # PER bit set in old PSW ? - jnz 0f # -> enabled, can't be a double fault + jnz 2f # -> fault in user space +#if IS_ENABLED(CONFIG_KVM) + # cleanup critical section for sie64a + lgr %r14,%r9 + slg %r14,BASED(.Lsie_critical_start) + clg %r14,BASED(.Lsie_critical_length) + jhe 0f + brasl %r14,.Lcleanup_sie +#endif +0: tmhh %r8,0x4000 # PER bit set in old PSW ? + jnz 1f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc -0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC +1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER - LAST_BREAK %r14 + j 3f +2: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER lg %r15,__LC_KERNEL_STACK lg %r14,__TI_task(%r12) aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 2f + jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: la %r11,STACK_FRAME_OVERHEAD(%r15) +3: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -435,24 +484,28 @@ ENTRY(pgm_check_handler) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 0f + jz 4f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -0: REENABLE_IRQS +4: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f sll %r10,2 - je .Lsysc_return + je .Lpgm_return lgf %r1,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler - j .Lsysc_return +.Lpgm_return: + LOCKDEP_SYS_EXIT + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lsysc_restore + j .Lsysc_tif # # PER event in supervisor state, must be kprobes @@ -462,7 +515,7 @@ ENTRY(pgm_check_handler) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap - j .Lsysc_return + j .Lpgm_return # # single stepped system call @@ -483,15 +536,9 @@ ENTRY(io_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,2 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user? - jz .Lio_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lio_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -587,6 +634,8 @@ ENTRY(io_int_handler) jo .Lio_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lio_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lio_uaccess j .Lio_return # beware of critical section cleanup @@ -609,6 +658,13 @@ ENTRY(io_int_handler) j .Lio_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lio_vxrs: + larl %r14,.Lio_return + jg load_fpu_regs + +# # _TIF_NEED_RESCHED is set, call schedule # .Lio_reschedule: @@ -652,15 +708,9 @@ ENTRY(ext_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,3 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user ? - jz .Lext_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lext_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -690,6 +740,122 @@ ENTRY(psw_idle) br %r14 .Lpsw_idle_end: +/* Store floating-point controls and floating-point or vector extension + * registers instead. A critical section cleanup assures that the registers + * are stored even if interrupted for some other work. The register %r2 + * designates a struct fpu to store register contents. If the specified + * structure does not contain a register save area, the register store is + * omitted (see also comments in arch_dup_task_struct()). + * + * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore + * of the register contents at system call or io return. + */ +ENTRY(save_fpu_regs) + lg %r2,__LC_CURRENT + aghi %r2,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + stfpc __THREAD_FPU_fpc(%r2) +.Lsave_fpu_regs_fpc_end: + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz .Lsave_fpu_regs_fp # no -> store FP regs +.Lsave_fpu_regs_vx_low: + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +.Lsave_fpu_regs_vx_high: + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j .Lsave_fpu_regs_done # -> set CIF_FPU flag +.Lsave_fpu_regs_fp: + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +.Lsave_fpu_regs_done: + oi __LC_CPU_FLAGS+7,_CIF_FPU + br %r14 +.Lsave_fpu_regs_end: + +/* Load floating-point controls and floating-point or vector extension + * registers. A critical section cleanup assures that the register contents + * are loaded even if interrupted for some other work. Depending on the saved + * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. + * + * There are special calling conventions to fit into sysc and io return work: + * %r15: <kernel stack> + * The function requires: + * %r4 and __SF_EMPTY+32(%r15) + */ +load_fpu_regs: + lg %r4,__LC_CURRENT + aghi %r4,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + lfpc __THREAD_FPU_fpc(%r4) + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs +.Lload_fpu_regs_vx_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo .Lload_fpu_regs_vx + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_vx: + VLM %v0,%v15,0,%r4 +.Lload_fpu_regs_vx_high: + VLM %v16,%v31,256,%r4 + j .Lload_fpu_regs_done +.Lload_fpu_regs_fp_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz .Lload_fpu_regs_fp + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_fp: + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +.Lload_fpu_regs_done: + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + br %r14 +.Lload_fpu_regs_end: + +/* Test and set the vector enablement control in CR0.46 */ +ENTRY(__ctl_set_vx) + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + br %r14 +.L__ctl_set_vx_end: + .L__critical_end: /* @@ -702,9 +868,8 @@ ENTRY(mcck_int_handler) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_MCK_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? jo .Lmcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA @@ -725,11 +890,7 @@ ENTRY(mcck_int_handler) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? jno .Lmcck_panic # no -> skip cleanup critical - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT - tm %r8,0x0001 # interrupting from user ? - jz .Lmcck_skip - UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER - LAST_BREAK %r14 + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) @@ -764,12 +925,8 @@ ENTRY(mcck_int_handler) lpswe __LC_RETURN_MCCK_PSW .Lmcck_panic: - lg %r14,__LC_PANIC_STACK - slgr %r14,%r15 - srag %r14,%r14,PAGE_SHIFT - jz 0f lg %r15,__LC_PANIC_STACK -0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j .Lmcck_skip # @@ -819,20 +976,13 @@ stack_overflow: jg kernel_stack_overflow #endif - .align 8 -.Lcleanup_table: - .quad system_call - .quad .Lsysc_do_svc - .quad .Lsysc_tif - .quad .Lsysc_restore - .quad .Lsysc_done - .quad .Lio_tif - .quad .Lio_restore - .quad .Lio_done - .quad psw_idle - .quad .Lpsw_idle_end - cleanup_critical: +#if IS_ENABLED(CONFIG_KVM) + clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap + jl 0f + clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done + jl .Lcleanup_sie +#endif clg %r9,BASED(.Lcleanup_table) # system_call jl 0f clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc @@ -853,8 +1003,54 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end jl .Lcleanup_idle + clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end + jl .Lcleanup_save_fpu_regs + clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end + jl .Lcleanup_load_fpu_regs + clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx + jl 0f + clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end + jl .Lcleanup___ctl_set_vx 0: br %r14 + .align 8 +.Lcleanup_table: + .quad system_call + .quad .Lsysc_do_svc + .quad .Lsysc_tif + .quad .Lsysc_restore + .quad .Lsysc_done + .quad .Lio_tif + .quad .Lio_restore + .quad .Lio_done + .quad psw_idle + .quad .Lpsw_idle_end + .quad save_fpu_regs + .quad .Lsave_fpu_regs_end + .quad load_fpu_regs + .quad .Lload_fpu_regs_end + .quad __ctl_set_vx + .quad .L__ctl_set_vx_end + +#if IS_ENABLED(CONFIG_KVM) +.Lcleanup_table_sie: + .quad .Lsie_gmap + .quad .Lsie_done + +.Lcleanup_sie: + lg %r9,__SF_EMPTY(%r15) # get control block pointer + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz 0f + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit + br %r14 +#endif .Lcleanup_system_call: # check if stpt has been executed @@ -915,7 +1111,7 @@ cleanup_critical: .quad system_call .quad .Lsysc_stmg .quad .Lsysc_per - .quad .Lsysc_vtime+18 + .quad .Lsysc_vtime+36 .quad .Lsysc_vtime+42 .Lcleanup_sysc_tif: @@ -981,6 +1177,145 @@ cleanup_critical: .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw +.Lcleanup_save_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + clg %r9,BASED(.Lcleanup_save_fpu_regs_done) + jhe 5f + clg %r9,BASED(.Lcleanup_save_fpu_regs_fp) + jhe 4f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high) + jhe 3f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low) + jhe 2f + clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) + jhe 1f + lg %r2,__LC_CURRENT +0: # Store floating-point controls + stfpc __THREAD_FPU_fpc(%r2) +1: # Load register save area and check if VX is active + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz 5f # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz 4f # no VX -> store FP regs +2: # Store vector registers (V0-V15) + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +3: # Store vector registers (V16-V31) + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j 5f # -> done, set CIF_FPU flag +4: # Store floating-point registers + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +5: # Set CIF_FPU flag + oi __LC_CPU_FLAGS+7,_CIF_FPU + lg %r9,48(%r11) # return from save_fpu_regs + br %r14 +.Lcleanup_save_fpu_fpc_end: + .quad .Lsave_fpu_regs_fpc_end +.Lcleanup_save_fpu_regs_vx_low: + .quad .Lsave_fpu_regs_vx_low +.Lcleanup_save_fpu_regs_vx_high: + .quad .Lsave_fpu_regs_vx_high +.Lcleanup_save_fpu_regs_fp: + .quad .Lsave_fpu_regs_fp +.Lcleanup_save_fpu_regs_done: + .quad .Lsave_fpu_regs_done + +.Lcleanup_load_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + clg %r9,BASED(.Lcleanup_load_fpu_regs_done) + jhe 1f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) + jhe 2f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) + jhe 3f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) + jhe 4f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) + jhe 5f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) + jhe 6f + lg %r4,__LC_CURRENT + lfpc __THREAD_FPU_fpc(%r4) + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz 3f # -> no VX, load FP regs +6: # Set VX-enablement control + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo 5f + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +5: # Load V0 ..V15 registers + VLM %v0,%v15,0,%r4 +4: # Load V16..V31 registers + VLM %v16,%v31,256,%r4 + j 1f +3: # Clear VX-enablement control for FP + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz 2f + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +2: # Load floating-point registers + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +1: # Clear CIF_FPU bit + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + lg %r9,48(%r11) # return from load_fpu_regs + br %r14 +.Lcleanup_load_fpu_regs_vx_ctl: + .quad .Lload_fpu_regs_vx_ctl +.Lcleanup_load_fpu_regs_vx: + .quad .Lload_fpu_regs_vx +.Lcleanup_load_fpu_regs_vx_high: + .quad .Lload_fpu_regs_vx_high +.Lcleanup_load_fpu_regs_fp_ctl: + .quad .Lload_fpu_regs_fp_ctl +.Lcleanup_load_fpu_regs_fp: + .quad .Lload_fpu_regs_fp +.Lcleanup_load_fpu_regs_done: + .quad .Lload_fpu_regs_done + +.Lcleanup___ctl_set_vx: + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + lg %r9,48(%r11) # return from __ctl_set_vx + br %r14 + /* * Integer constants */ @@ -989,62 +1324,11 @@ cleanup_critical: .quad .L__critical_start .Lcritical_length: .quad .L__critical_end - .L__critical_start - - #if IS_ENABLED(CONFIG_KVM) -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ -ENTRY(sie64a) - stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers - stg %r2,__SF_EMPTY(%r15) # save control block pointer - stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_GMAP # get gmap pointer - ltgr %r14,%r14 - jz .Lsie_gmap - lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce -.Lsie_gmap: - lg %r14,__SF_EMPTY(%r15) # get control block pointer - oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),3 # last exit... - jnz .Lsie_done - LPP __SF_EMPTY(%r15) # set guest id - sie 0(%r14) -.Lsie_done: - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce -# some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. -# See also HANDLE_SIE_INTERCEPT -.Lrewind_pad: - nop 0 - .globl sie_exit -sie_exit: - lg %r14,__SF_EMPTY+8(%r15) # load guest register save area - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code - br %r14 -.Lsie_fault: - lghi %r14,-EFAULT - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code - j sie_exit - - .align 8 -.Lsie_critical: +.Lsie_critical_start: .quad .Lsie_gmap .Lsie_critical_length: .quad .Lsie_done - .Lsie_gmap - - EX_TABLE(.Lrewind_pad,.Lsie_fault) - EX_TABLE(sie_exit,.Lsie_fault) #endif .section .rodata, "a" diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 59b7c6470567..1255c6c5353e 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -370,6 +370,7 @@ ENTRY(startup_kdump) xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 + lctlg %c0,%c15,0x200(%r0) # initialize control registers stck __LC_LAST_UPDATE_CLOCK spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) @@ -413,9 +414,9 @@ ENTRY(startup_kdump) # followed by the facility words. #if defined(CONFIG_MARCH_Z13) - .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 + .long 2, 0xc100eff2, 0xf46cc800 #elif defined(CONFIG_MARCH_ZEC12) - .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 + .long 2, 0xc100eff2, 0xf46cc800 #elif defined(CONFIG_MARCH_Z196) .long 2, 0xc100eff2, 0xf46c0000 #elif defined(CONFIG_MARCH_Z10) diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index a90299600483..c9dac2139f59 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -44,12 +44,9 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected, unsigned char *ipn = (unsigned char *)new; pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); - pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", - ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); - pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", - ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); - pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", - ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); + pr_emerg("Found: %6ph\n", ipc); + pr_emerg("Expected: %6ph\n", ipe); + pr_emerg("New: %6ph\n", ipn); panic("Corrupted kernel text"); } diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 56b550893593..0ae6f8e74840 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -21,6 +21,7 @@ #include <asm/nmi.h> #include <asm/crw.h> #include <asm/switch_to.h> +#include <asm/fpu-internal.h> #include <asm/ctl_reg.h> struct mcck_struct { @@ -164,8 +165,12 @@ static int notrace s390_revalidate_registers(struct mci *mci) cr0.val = S390_lowcore.cregs_save_area[0]; cr0.afp = cr0.vx = 1; __ctl_load(cr0.val, 0, 0); - restore_vx_regs((__vector128 *) - &S390_lowcore.vector_save_area); + asm volatile( + " la 1,%0\n" + " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ + " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ + : : "Q" (*(struct vx_array *) + &S390_lowcore.vector_save_area) : "1"); __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); } /* Revalidate access registers */ @@ -358,4 +363,4 @@ static int __init machine_check_init(void) ctl_set_bit(14, 24); /* enable warning MCH */ return 0; } -arch_initcall(machine_check_init); +early_initcall(machine_check_init); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index afe05bfb7e00..b973972f6ba5 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1019,12 +1019,9 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) break; } - /* The host-program-parameter (hpp) contains the sie control - * block that is set by sie64a() in entry64.S. Check if hpp - * refers to a valid control block and set sde_regs flags - * accordingly. This would allow to use hpp values for other - * purposes too. - * For now, simply use a non-zero value as guest indicator. + /* The host-program-parameter (hpp) contains the pid of + * the CPU thread as set by sie64a() in entry.S. + * If non-zero assume a guest sample. */ if (sfr->basic.hpp) sde_regs->in_guest = 1; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 8f587d871b9f..f2dac9f0799d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -81,8 +81,38 @@ void release_thread(struct task_struct *dead_task) void arch_release_task_struct(struct task_struct *tsk) { - if (tsk->thread.vxrs) - kfree(tsk->thread.vxrs); + /* Free either the floating-point or the vector register save area */ + kfree(tsk->thread.fpu.regs); +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + *dst = *src; + + /* Set up a new floating-point register save area */ + dst->thread.fpu.fpc = 0; + dst->thread.fpu.flags = 0; /* Always start with VX disabled */ + dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, + GFP_KERNEL|__GFP_REPEAT); + if (!dst->thread.fpu.fprs) + return -ENOMEM; + + /* + * Save the floating-point or vector register state of the current + * task. The state is not saved for early kernel threads, for example, + * the init_task, which do not have an allocated save area. + * The CIF_FPU flag is set in any case to lazy clear or restore a saved + * state when switching to a different task or returning to user space. + */ + save_fpu_regs(); + dst->thread.fpu.fpc = current->thread.fpu.fpc; + if (is_vx_task(current)) + convert_vx_to_fp(dst->thread.fpu.fprs, + current->thread.fpu.vxrs); + else + memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs, + sizeof(freg_t) * __NUM_FPRS); + return 0; } int copy_thread(unsigned long clone_flags, unsigned long new_stackp, @@ -142,11 +172,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.ri_signum = 0; frame->childregs.psw.mask &= ~PSW_MASK_RI; - /* Save the fpu registers to new thread structure. */ - save_fp_ctl(&p->thread.fp_regs.fpc); - save_fp_regs(p->thread.fp_regs.fprs); - p->thread.fp_regs.pad = 0; - p->thread.vxrs = NULL; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { unsigned long tls = frame->childregs.gprs[6]; @@ -162,7 +187,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, asmlinkage void execve_tail(void) { - current->thread.fp_regs.fpc = 0; + current->thread.fpu.fpc = 0; asm volatile("sfpc %0" : : "d" (0)); } @@ -171,8 +196,15 @@ asmlinkage void execve_tail(void) */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { - save_fp_ctl(&fpregs->fpc); - save_fp_regs(fpregs->fprs); + save_fpu_regs(); + fpregs->fpc = current->thread.fpu.fpc; + fpregs->pad = 0; + if (is_vx_task(current)) + convert_vx_to_fp((freg_t *)&fpregs->fprs, + current->thread.fpu.vxrs); + else + memcpy(&fpregs->fprs, current->thread.fpu.fprs, + sizeof(fpregs->fprs)); return 1; } EXPORT_SYMBOL(dump_fpu); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index dc488e13b7e3..e6e077ae3990 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -41,6 +41,15 @@ void cpu_init(void) } /* + * cpu_have_feature - Test CPU features on module initialization + */ +int cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} +EXPORT_SYMBOL(cpu_have_feature); + +/* * show_cpuinfo - Get information on one CPU for use by procfs. */ static int show_cpuinfo(struct seq_file *m, void *v) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index d363c9c322a1..8b1c8e33f184 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -45,39 +45,27 @@ void update_cr_regs(struct task_struct *task) struct per_regs old, new; /* Take care of the enable/disable of transactional execution. */ - if (MACHINE_HAS_TE || MACHINE_HAS_VX) { + if (MACHINE_HAS_TE) { unsigned long cr, cr_new; __ctl_store(cr, 0, 0); - cr_new = cr; - if (MACHINE_HAS_TE) { - /* Set or clear transaction execution TXC bit 8. */ - cr_new |= (1UL << 55); - if (task->thread.per_flags & PER_FLAG_NO_TE) - cr_new &= ~(1UL << 55); - } - if (MACHINE_HAS_VX) { - /* Enable/disable of vector extension */ - cr_new &= ~(1UL << 17); - if (task->thread.vxrs) - cr_new |= (1UL << 17); - } + /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); + if (task->thread.per_flags & PER_FLAG_NO_TE) + cr_new &= ~(1UL << 55); if (cr_new != cr) __ctl_load(cr_new, 0, 0); - if (MACHINE_HAS_TE) { - /* Set/clear transaction execution TDC bits 62/63. */ - __ctl_store(cr, 2, 2); - cr_new = cr & ~3UL; - if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { - if (task->thread.per_flags & - PER_FLAG_TE_ABORT_RAND_TEND) - cr_new |= 1UL; - else - cr_new |= 2UL; - } - if (cr_new != cr) - __ctl_load(cr_new, 2, 2); + /* Set or clear transaction execution TDC bits 62 and 63. */ + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) + cr_new |= 1UL; + else + cr_new |= 2UL; } + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); } /* Copy user specified PER registers */ new.control = thread->per_user.control; @@ -242,21 +230,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fp_regs.fpc; + tmp = child->thread.fpu.fpc; tmp <<= BITS_PER_LONG - 32; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) tmp = *(addr_t *) - ((addr_t) child->thread.vxrs + 2*offset); + ((addr_t) child->thread.fpu.vxrs + 2*offset); else tmp = *(addr_t *) - ((addr_t) &child->thread.fp_regs.fprs + offset); + ((addr_t) &child->thread.fpu.fprs + offset); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -387,20 +375,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) if ((unsigned int) data != 0 || test_fp_ctl(data >> (BITS_PER_LONG - 32))) return -EINVAL; - child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32); + child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) *(addr_t *)((addr_t) - child->thread.vxrs + 2*offset) = data; + child->thread.fpu.vxrs + 2*offset) = data; else *(addr_t *)((addr_t) - &child->thread.fp_regs.fprs + offset) = data; + &child->thread.fpu.fprs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -621,20 +609,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fp_regs.fpc; + tmp = child->thread.fpu.fpc; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) tmp = *(__u32 *) - ((addr_t) child->thread.vxrs + 2*offset); + ((addr_t) child->thread.fpu.vxrs + 2*offset); else tmp = *(__u32 *) - ((addr_t) &child->thread.fp_regs.fprs + offset); + ((addr_t) &child->thread.fpu.fprs + offset); } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -746,20 +734,20 @@ static int __poke_user_compat(struct task_struct *child, */ if (test_fp_ctl(tmp)) return -EINVAL; - child->thread.fp_regs.fpc = data; + child->thread.fpu.fpc = data; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) *(__u32 *)((addr_t) - child->thread.vxrs + 2*offset) = tmp; + child->thread.fpu.vxrs + 2*offset) = tmp; else *(__u32 *)((addr_t) - &child->thread.fp_regs.fprs + offset) = tmp; + &child->thread.fpu.fprs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -952,18 +940,16 @@ static int s390_fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - if (target == current) { - save_fp_ctl(&target->thread.fp_regs.fpc); - save_fp_regs(target->thread.fp_regs.fprs); - } else if (target->thread.vxrs) { - int i; + _s390_fp_regs fp_regs; + + if (target == current) + save_fpu_regs(); + + fp_regs.fpc = target->thread.fpu.fpc; + fpregs_store(&fp_regs, &target->thread.fpu); - for (i = 0; i < __NUM_VXRS_LOW; i++) - target->thread.fp_regs.fprs[i] = - *(freg_t *)(target->thread.vxrs + i); - } return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fp_regs, 0, -1); + &fp_regs, 0, -1); } static int s390_fpregs_set(struct task_struct *target, @@ -972,41 +958,33 @@ static int s390_fpregs_set(struct task_struct *target, const void __user *ubuf) { int rc = 0; + freg_t fprs[__NUM_FPRS]; - if (target == current) { - save_fp_ctl(&target->thread.fp_regs.fpc); - save_fp_regs(target->thread.fp_regs.fprs); - } + if (target == current) + save_fpu_regs(); /* If setting FPC, must validate it first. */ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { - u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 }; + u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; if (ufpc[1] != 0 || test_fp_ctl(ufpc[0])) return -EINVAL; - target->thread.fp_regs.fpc = ufpc[0]; + target->thread.fpu.fpc = ufpc[0]; } if (rc == 0 && count > 0) rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.fp_regs.fprs, - offsetof(s390_fp_regs, fprs), -1); - - if (rc == 0) { - if (target == current) { - restore_fp_ctl(&target->thread.fp_regs.fpc); - restore_fp_regs(target->thread.fp_regs.fprs); - } else if (target->thread.vxrs) { - int i; - - for (i = 0; i < __NUM_VXRS_LOW; i++) - *(freg_t *)(target->thread.vxrs + i) = - target->thread.fp_regs.fprs[i]; - } - } + fprs, offsetof(s390_fp_regs, fprs), -1); + if (rc) + return rc; + + if (is_vx_task(target)) + convert_fp_to_vx(target->thread.fpu.vxrs, fprs); + else + memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); return rc; } @@ -1069,11 +1047,11 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (target->thread.vxrs) { + if (is_vx_task(target)) { if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); } else memset(vxrs, 0, sizeof(vxrs)); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); @@ -1089,20 +1067,17 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!target->thread.vxrs) { + if (!is_vx_task(target)) { rc = alloc_vector_registers(target); if (rc) return rc; } else if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); - if (rc == 0) { + if (rc == 0) for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i]; - if (target == current) - restore_vx_regs(target->thread.vxrs); - } + *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; return rc; } @@ -1116,10 +1091,10 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (target->thread.vxrs) { + if (is_vx_task(target)) { if (target == current) - save_vx_regs(target->thread.vxrs); - memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW, + save_fpu_regs(); + memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); } else memset(vxrs, 0, sizeof(vxrs)); @@ -1135,18 +1110,15 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!target->thread.vxrs) { + if (!is_vx_task(target)) { rc = alloc_vector_registers(target); if (rc) return rc; } else if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.vxrs + __NUM_VXRS_LOW, 0, -1); - if (rc == 0 && target == current) - restore_vx_regs(target->thread.vxrs); - + target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); return rc; } diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 9f60467938d1..5090d3dad10b 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,5 +1,6 @@ #include <linux/module.h> #include <linux/kvm_host.h> +#include <asm/fpu-internal.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER @@ -8,6 +9,8 @@ EXPORT_SYMBOL(_mcount); #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); +EXPORT_SYMBOL(save_fpu_regs); +EXPORT_SYMBOL(__ctl_set_vx); #endif EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S deleted file mode 100644 index ada0c07fe1a8..000000000000 --- a/arch/s390/kernel/sclp.S +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Mini SCLP driver. - * - * Copyright IBM Corp. 2004, 2009 - * - * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, - * Heiko Carstens <heiko.carstens@de.ibm.com>, - * - */ - -#include <linux/linkage.h> -#include <asm/irq.h> - -LC_EXT_NEW_PSW = 0x58 # addr of ext int handler -LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit -LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter -LC_EXT_INT_CODE = 0x86 # addr of ext int code -LC_AR_MODE_ID = 0xa3 - -# -# Subroutine which waits synchronously until either an external interruption -# or a timeout occurs. -# -# Parameters: -# R2 = 0 for no timeout, non-zero for timeout in (approximated) seconds -# -# Returns: -# R2 = 0 on interrupt, 2 on timeout -# R3 = external interruption parameter if R2=0 -# - -_sclp_wait_int: - stm %r6,%r15,24(%r15) # save registers - basr %r13,0 # get base register -.LbaseS1: - ahi %r15,-96 # create stack frame - la %r8,LC_EXT_NEW_PSW # register int handler - la %r9,.LextpswS1-.LbaseS1(%r13) - tm LC_AR_MODE_ID,1 - jno .Lesa1 - la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit - la %r9,.LextpswS1_64-.LbaseS1(%r13) -.Lesa1: - mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) - mvc 0(16,%r8),0(%r9) - epsw %r6,%r7 # set current addressing mode - nill %r6,0x1 # in new psw (31 or 64 bit mode) - nilh %r7,0x8000 - stm %r6,%r7,0(%r8) - lhi %r6,0x0200 # cr mask for ext int (cr0.54) - ltr %r2,%r2 - jz .LsetctS1 - ahi %r6,0x0800 # cr mask for clock int (cr0.52) - stck .LtimeS1-.LbaseS1(%r13) # initiate timeout - al %r2,.LtimeS1-.LbaseS1(%r13) - st %r2,.LtimeS1-.LbaseS1(%r13) - sckc .LtimeS1-.LbaseS1(%r13) - -.LsetctS1: - stctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # enable required interrupts - l %r0,.LctlS1-.LbaseS1(%r13) - lhi %r1,~(0x200 | 0x800) # clear old values - nr %r1,%r0 - or %r1,%r6 # set new value - st %r1,.LctlS1-.LbaseS1(%r13) - lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) - st %r0,.LctlS1-.LbaseS1(%r13) - lhi %r2,2 # return code for timeout -.LloopS1: - lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt -.LwaitS1: - lh %r7,LC_EXT_INT_CODE - chi %r7,EXT_IRQ_CLK_COMP # timeout? - je .LtimeoutS1 - chi %r7,EXT_IRQ_SERVICE_SIG # service int? - jne .LloopS1 - sr %r2,%r2 - l %r3,LC_EXT_INT_PARAM -.LtimeoutS1: - lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting - # restore old handler - mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13) - lm %r6,%r15,120(%r15) # restore registers - br %r14 # return to caller - - .align 8 -.LoldpswS1: - .long 0, 0, 0, 0 # old ext int PSW -.LextpswS1: - .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int -.LextpswS1_64: - .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit -.LwaitpswS1: - .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int -.LtimeS1: - .quad 0 # current time -.LctlS1: - .long 0 # CT0 contents - -# -# Subroutine to synchronously issue a service call. -# -# Parameters: -# R2 = command word -# R3 = sccb address -# -# Returns: -# R2 = 0 on success, 1 on failure -# R3 = sccb response code if R2 = 0 -# - -_sclp_servc: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - lr %r6,%r2 # save command word - lr %r7,%r3 # save sccb address -.LretryS2: - lhi %r2,1 # error return code - .insn rre,0xb2200000,%r6,%r7 # servc - brc 1,.LendS2 # exit if not operational - brc 8,.LnotbusyS2 # go on if not busy - sr %r2,%r2 # wait until no longer busy - bras %r14,_sclp_wait_int - j .LretryS2 # retry -.LnotbusyS2: - sr %r2,%r2 # wait until result - bras %r14,_sclp_wait_int - sr %r2,%r2 - lh %r3,6(%r7) -.LendS2: - lm %r6,%r15,120(%r15) # restore registers - br %r14 - -# -# Subroutine to set up the SCLP interface. -# -# Parameters: -# R2 = 0 to activate, non-zero to deactivate -# -# Returns: -# R2 = 0 on success, non-zero on failure -# - -_sclp_setup: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - basr %r13,0 # get base register -.LbaseS3: - l %r6,.LsccbS0-.LbaseS3(%r13) # prepare init mask sccb - mvc 0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13) - ltr %r2,%r2 # initialization? - jz .LdoinitS3 # go ahead - # clear masks - xc .LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6) -.LdoinitS3: - l %r2,.LwritemaskS3-.LbaseS3(%r13)# get command word - lr %r3,%r6 # get sccb address - bras %r14,_sclp_servc # issue service call - ltr %r2,%r2 # servc successful? - jnz .LerrorS3 - chi %r3,0x20 # write mask successful? - jne .LerrorS3 - # check masks - la %r2,.LinitmaskS3-.LinitsccbS3(%r6) - l %r1,0(%r2) # receive mask ok? - n %r1,12(%r2) - cl %r1,0(%r2) - jne .LerrorS3 - l %r1,4(%r2) # send mask ok? - n %r1,8(%r2) - cl %r1,4(%r2) - sr %r2,%r2 - je .LendS3 -.LerrorS3: - lhi %r2,1 # error return code -.LendS3: - lm %r6,%r15,120(%r15) # restore registers - br %r14 -.LwritemaskS3: - .long 0x00780005 # SCLP command for write mask -.LinitsccbS3: - .word .LinitendS3-.LinitsccbS3 - .byte 0,0,0,0 - .word 0 - .word 0 - .word 4 -.LinitmaskS3: - .long 0x80000000 - .long 0x40000000 - .long 0 - .long 0 -.LinitendS3: - -# -# Subroutine which prints a given text to the SCLP console. -# -# Parameters: -# R2 = address of nil-terminated ASCII text -# -# Returns: -# R2 = 0 on success, 1 on failure -# - -_sclp_print: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - basr %r13,0 # get base register -.LbaseS4: - l %r8,.LsccbS0-.LbaseS4(%r13) # prepare write data sccb - mvc 0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13) - la %r7,.LmtoS4-.LwritesccbS4(%r8) # current mto addr - sr %r0,%r0 - l %r10,.Lascebc-.LbaseS4(%r13) # address of translation table -.LinitmtoS4: - # initialize mto - mvc 0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13) - lhi %r6,.LmtoendS4-.LmtoS4 # current mto length -.LloopS4: - ic %r0,0(%r2) # get character - ahi %r2,1 - ltr %r0,%r0 # end of string? - jz .LfinalizemtoS4 - chi %r0,0x0a # end of line (NL)? - jz .LfinalizemtoS4 - stc %r0,0(%r6,%r7) # copy to mto - la %r11,0(%r6,%r7) - tr 0(1,%r11),0(%r10) # translate to EBCDIC - ahi %r6,1 - j .LloopS4 -.LfinalizemtoS4: - sth %r6,0(%r7) # update mto length - lh %r9,.LmdbS4-.LwritesccbS4(%r8) # update mdb length - ar %r9,%r6 - sth %r9,.LmdbS4-.LwritesccbS4(%r8) - lh %r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length - ar %r9,%r6 - sth %r9,.LevbufS4-.LwritesccbS4(%r8) - lh %r9,0(%r8) # update sccb length - ar %r9,%r6 - sth %r9,0(%r8) - ar %r7,%r6 # update current mto address - ltr %r0,%r0 # more characters? - jnz .LinitmtoS4 - l %r2,.LwritedataS4-.LbaseS4(%r13)# write data - lr %r3,%r8 - bras %r14,_sclp_servc - ltr %r2,%r2 # servc successful? - jnz .LendS4 - chi %r3,0x20 # write data successful? - je .LendS4 - lhi %r2,1 # error return code -.LendS4: - lm %r6,%r15,120(%r15) # restore registers - br %r14 - -# -# Function which prints a given text to the SCLP console. -# -# Parameters: -# R2 = address of nil-terminated ASCII text -# -# Returns: -# R2 = 0 on success, 1 on failure -# - -ENTRY(_sclp_print_early) - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - tm LC_AR_MODE_ID,1 - jno .Lesa2 - ahi %r15,-80 - stmh %r6,%r15,96(%r15) # store upper register halves - basr %r13,0 - lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves -.Lesa2: - lr %r10,%r2 # save string pointer - lhi %r2,0 - bras %r14,_sclp_setup # enable console - ltr %r2,%r2 - jnz .LendS5 - lr %r2,%r10 - bras %r14,_sclp_print # print string - ltr %r2,%r2 - jnz .LendS5 - lhi %r2,1 - bras %r14,_sclp_setup # disable console -.LendS5: - tm LC_AR_MODE_ID,1 - jno .Lesa3 - lgfr %r2,%r2 # sign extend return value - lmh %r6,%r15,96(%r15) # restore upper register halves - ahi %r15,80 -.Lesa3: - lm %r6,%r15,120(%r15) # restore registers - br %r14 -.Lzeroes: - .fill 64,4,0 - -.LwritedataS4: - .long 0x00760005 # SCLP command for write data -.LwritesccbS4: - # sccb - .word .LmtoS4-.LwritesccbS4 - .byte 0 - .byte 0,0,0 - .word 0 - - # evbuf -.LevbufS4: - .word .LmtoS4-.LevbufS4 - .byte 0x02 - .byte 0 - .word 0 - -.LmdbS4: - # mdb - .word .LmtoS4-.LmdbS4 - .word 1 - .long 0xd4c4c240 - .long 1 - - # go -.LgoS4: - .word .LmtoS4-.LgoS4 - .word 1 - .long 0 - .byte 0,0,0,0,0,0,0,0 - .byte 0,0,0 - .byte 0 - .byte 0,0,0,0,0,0,0 - .byte 0 - .word 0 - .byte 0,0,0,0,0,0,0,0,0,0 - .byte 0,0,0,0,0,0,0,0 - .byte 0,0,0,0,0,0,0,0 - -.LmtoS4: - .word .LmtoendS4-.LmtoS4 - .word 4 - .word 0x1000 - .byte 0 - .byte 0,0,0 -.LmtoendS4: - - # Global constants -.LsccbS0: - .long _sclp_work_area -.Lascebc: - .long _ascebc - -.section .data,"aw",@progbits - .balign 4096 -_sclp_work_area: - .fill 4096 -.previous diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c new file mode 100644 index 000000000000..fa0bdff1d413 --- /dev/null +++ b/arch/s390/kernel/sclp.c @@ -0,0 +1,160 @@ +/* + * Copyright IBM Corp. 2015 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ +#include <linux/kernel.h> +#include <asm/ebcdic.h> +#include <asm/irq.h> +#include <asm/lowcore.h> +#include <asm/processor.h> +#include <asm/sclp.h> + +static char _sclp_work_area[4096] __aligned(PAGE_SIZE); + +static void _sclp_wait_int(void) +{ + unsigned long cr0, cr0_new, psw_mask, addr; + psw_t psw_ext_save, psw_wait; + + __ctl_store(cr0, 0, 0); + cr0_new = cr0 | 0x200; + __ctl_load(cr0_new, 0, 0); + + psw_ext_save = S390_lowcore.external_new_psw; + psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); + S390_lowcore.external_new_psw.mask = psw_mask; + psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; + S390_lowcore.ext_int_code = 0; + + do { + asm volatile( + " larl %[addr],0f\n" + " stg %[addr],%[psw_wait_addr]\n" + " stg %[addr],%[psw_ext_addr]\n" + " lpswe %[psw_wait]\n" + "0:\n" + : [addr] "=&d" (addr), + [psw_wait_addr] "=Q" (psw_wait.addr), + [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr) + : [psw_wait] "Q" (psw_wait) + : "cc", "memory"); + } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG); + + __ctl_load(cr0, 0, 0); + S390_lowcore.external_new_psw = psw_ext_save; +} + +static int _sclp_servc(unsigned int cmd, char *sccb) +{ + unsigned int cc; + + do { + asm volatile( + " .insn rre,0xb2200000,%1,%2\n" + " ipm %0\n" + : "=d" (cc) : "d" (cmd), "a" (sccb) + : "cc", "memory"); + cc >>= 28; + if (cc == 3) + return -EINVAL; + _sclp_wait_int(); + } while (cc != 0); + return (*(unsigned short *)(sccb + 6) == 0x20) ? 0 : -EIO; +} + +static int _sclp_setup(int disable) +{ + static unsigned char init_sccb[] = { + 0x00, 0x1c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, + 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + unsigned int *masks; + int rc; + + memcpy(_sclp_work_area, init_sccb, 28); + masks = (unsigned int *)(_sclp_work_area + 12); + if (disable) + memset(masks, 0, 16); + /* SCLP write mask */ + rc = _sclp_servc(0x00780005, _sclp_work_area); + if (rc) + return rc; + if ((masks[0] & masks[3]) != masks[0] || + (masks[1] & masks[2]) != masks[1]) + return -EIO; + return 0; +} + +static int _sclp_print(const char *str) +{ + static unsigned char write_head[] = { + /* sccb header */ + 0x00, 0x52, /* 0 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */ + /* evbuf */ + 0x00, 0x4a, /* 8 */ + 0x02, 0x00, 0x00, 0x00, /* 10 */ + /* mdb */ + 0x00, 0x44, /* 14 */ + 0x00, 0x01, /* 16 */ + 0xd4, 0xc4, 0xc2, 0x40, /* 18 */ + 0x00, 0x00, 0x00, 0x01, /* 22 */ + /* go */ + 0x00, 0x38, /* 26 */ + 0x00, 0x01, /* 28 */ + 0x00, 0x00, 0x00, 0x00, /* 30 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */ + 0x00, 0x00, 0x00, 0x00, /* 50 */ + 0x00, 0x00, /* 54 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */ + 0x00, 0x00, /* 80 */ + }; + static unsigned char write_mto[] = { + /* mto */ + 0x00, 0x0a, /* 0 */ + 0x00, 0x04, /* 2 */ + 0x10, 0x00, /* 4 */ + 0x00, 0x00, 0x00, 0x00 /* 6 */ + }; + unsigned char *ptr, ch; + unsigned int count; + + memcpy(_sclp_work_area, write_head, sizeof(write_head)); + ptr = _sclp_work_area + sizeof(write_head); + do { + memcpy(ptr, write_mto, sizeof(write_mto)); + for (count = sizeof(write_mto); (ch = *str++) != 0; count++) { + if (ch == 0x0a) + break; + ptr[count] = _ascebc[ch]; + } + /* Update length fields in mto, mdb, evbuf and sccb */ + *(unsigned short *) ptr = count; + *(unsigned short *)(_sclp_work_area + 14) += count; + *(unsigned short *)(_sclp_work_area + 8) += count; + *(unsigned short *)(_sclp_work_area + 0) += count; + ptr += count; + } while (ch != 0); + + /* SCLP write data */ + return _sclp_servc(0x00760005, _sclp_work_area); +} + +int _sclp_print_early(const char *str) +{ + int rc; + + rc = _sclp_setup(0); + if (rc) + return rc; + rc = _sclp_print(str); + if (rc) + return rc; + return _sclp_setup(1); +} diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ca070d260af2..ce0cbd6ba7ca 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -62,6 +62,7 @@ #include <asm/os_info.h> #include <asm/sclp.h> #include <asm/sysinfo.h> +#include <asm/numa.h> #include "entry.h" /* @@ -76,7 +77,7 @@ EXPORT_SYMBOL(console_devno); unsigned int console_irq = -1; EXPORT_SYMBOL(console_irq); -unsigned long elf_hwcap = 0; +unsigned long elf_hwcap __read_mostly = 0; char elf_platform[ELF_PLATFORM_SIZE]; int __initdata memory_end_set; @@ -688,7 +689,7 @@ static void __init setup_memory(void) /* * Setup hardware capabilities. */ -static void __init setup_hwcaps(void) +static int __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; struct cpuid cpu_id; @@ -754,9 +755,11 @@ static void __init setup_hwcaps(void) elf_hwcap |= HWCAP_S390_TE; /* - * Vector extension HWCAP_S390_VXRS is bit 11. + * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension + * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX + * instead of facility bit 129. */ - if (test_facility(129)) + if (MACHINE_HAS_VX) elf_hwcap |= HWCAP_S390_VXRS; get_cpu_id(&cpu_id); add_device_randomness(&cpu_id, sizeof(cpu_id)); @@ -793,7 +796,9 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; } + return 0; } +arch_initcall(setup_hwcaps); /* * Add system information as device randomness @@ -879,11 +884,7 @@ void __init setup_arch(char **cmdline_p) setup_lowcore(); smp_fill_possible_mask(); cpu_init(); - - /* - * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). - */ - setup_hwcaps(); + numa_setup(); /* * Create kernel page tables and switch to virtual addressing. diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c551f22ce066..9549af102d75 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -105,32 +105,13 @@ struct rt_sigframe static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fp_ctl(¤t->thread.fp_regs.fpc); - if (current->thread.vxrs) { - int i; - - save_vx_regs(current->thread.vxrs); - for (i = 0; i < __NUM_FPRS; i++) - current->thread.fp_regs.fprs[i] = - *(freg_t *)(current->thread.vxrs + i); - } else - save_fp_regs(current->thread.fp_regs.fprs); + save_fpu_regs(); } /* Load registers after signal return */ static void load_sigregs(void) { restore_access_regs(current->thread.acrs); - /* restore_fp_ctl is done in restore_sigregs */ - if (current->thread.vxrs) { - int i; - - for (i = 0; i < __NUM_FPRS; i++) - *(freg_t *)(current->thread.vxrs + i) = - current->thread.fp_regs.fprs[i]; - restore_vx_regs(current->thread.vxrs); - } else - restore_fp_regs(current->thread.fp_regs.fprs); } /* Returns non-zero on fault. */ @@ -146,8 +127,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, - sizeof(user_sregs.fpregs)); + fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) return -EFAULT; return 0; @@ -166,8 +146,8 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) return -EINVAL; - /* Loading the floating-point-control word can fail. Do that first. */ - if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + /* Test the floating-point-control word. */ + if (test_fp_ctl(user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ @@ -185,8 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, - sizeof(current->thread.fp_regs)); + fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -200,13 +179,13 @@ static int save_sigregs_ext(struct pt_regs *regs, int i; /* Save vector registers to signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.vxrs + __NUM_VXRS_LOW, + current->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -220,15 +199,15 @@ static int restore_sigregs_ext(struct pt_regs *regs, int i; /* Restore vector registers from signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; + *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; } return 0; } @@ -243,6 +222,7 @@ SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; set_current_blocked(&set); + save_fpu_regs(); if (restore_sigregs(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext(regs, &frame->sregs_ext)) @@ -266,6 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + save_fpu_regs(); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) @@ -400,7 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, uc_flags = 0; if (MACHINE_HAS_VX) { frame_size += sizeof(_sigregs_ext); - if (current->thread.vxrs) + if (is_vx_task(current)) uc_flags |= UC_VXRS; } frame = get_sigframe(&ksig->ka, regs, frame_size); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6f54c175f5c9..c6355e6f3fcc 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -532,8 +532,8 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static void __smp_store_cpu_state(struct save_area_ext *sa_ext, u16 address, - int is_boot_cpu) +static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext, + u16 address, int is_boot_cpu) { void *lc = (void *)(unsigned long) store_prefix(); unsigned long vx_sa; diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 1acad02681c4..f3f4a137aef6 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -276,9 +276,9 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64) SYSCALL(sys_statfs64,compat_sys_statfs64) SYSCALL(sys_fstatfs64,compat_sys_fstatfs64) SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages) -NI_SYSCALL /* 268 sys_mbind */ -NI_SYSCALL /* 269 sys_get_mempolicy */ -NI_SYSCALL /* 270 sys_set_mempolicy */ +SYSCALL(sys_mbind,compat_sys_mbind) +SYSCALL(sys_get_mempolicy,compat_sys_get_mempolicy) +SYSCALL(sys_set_mempolicy,compat_sys_set_mempolicy) SYSCALL(sys_mq_open,compat_sys_mq_open) SYSCALL(sys_mq_unlink,compat_sys_mq_unlink) SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend) @@ -295,7 +295,7 @@ SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) SYSCALL(sys_inotify_init,sys_inotify_init) SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) -NI_SYSCALL /* 287 sys_migrate_pages */ +SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) SYSCALL(sys_openat,compat_sys_openat) SYSCALL(sys_mkdirat,compat_sys_mkdirat) SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */ @@ -318,7 +318,7 @@ SYSCALL(sys_splice,compat_sys_splice) SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range) SYSCALL(sys_tee,compat_sys_tee) SYSCALL(sys_vmsplice,compat_sys_vmsplice) -NI_SYSCALL /* 310 sys_move_pages */ +SYSCALL(sys_move_pages,compat_sys_move_pages) SYSCALL(sys_getcpu,compat_sys_getcpu) SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait) SYSCALL(sys_utimes,compat_sys_utimes) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..627887b075a7 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -58,6 +58,9 @@ EXPORT_SYMBOL_GPL(sched_clock_base_cc); static DEFINE_PER_CPU(struct clock_event_device, comparators); +ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier); +EXPORT_SYMBOL(s390_epoch_delta_notifier); + /* * Scheduler clock - returns current time in nanosec units. */ @@ -752,7 +755,7 @@ static void clock_sync_cpu(struct clock_sync_data *sync) static int etr_sync_clock(void *data) { static int first; - unsigned long long clock, old_clock, delay, delta; + unsigned long long clock, old_clock, clock_delta, delay, delta; struct clock_sync_data *etr_sync; struct etr_aib *sync_port, *aib; int port; @@ -789,6 +792,9 @@ static int etr_sync_clock(void *data) delay = (unsigned long long) (aib->edf2.etv - sync_port->edf2.etv) << 32; delta = adjust_time(old_clock, clock, delay); + clock_delta = clock - old_clock; + atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, + &clock_delta); etr_sync->fixup_cc = delta; fixup_clock_comparator(delta); /* Verify that the clock is properly set. */ @@ -1526,7 +1532,7 @@ void stp_island_check(void) static int stp_sync_clock(void *data) { static int first; - unsigned long long old_clock, delta; + unsigned long long old_clock, delta, new_clock, clock_delta; struct clock_sync_data *stp_sync; int rc; @@ -1551,7 +1557,11 @@ static int stp_sync_clock(void *data) old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_tod_clock(), 0); + new_clock = get_tod_clock(); + delta = adjust_time(old_clock, new_clock, 0); + clock_delta = new_clock - old_clock; + atomic_notifier_call_chain(&s390_epoch_delta_notifier, + 0, &clock_delta); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 5728c5bd44a8..bf05e7fc3e70 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -18,7 +18,10 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/mm.h> +#include <linux/nodemask.h> +#include <linux/node.h> #include <asm/sysinfo.h> +#include <asm/numa.h> #define PTF_HORIZONTAL (0UL) #define PTF_VERTICAL (1UL) @@ -37,8 +40,10 @@ static struct sysinfo_15_1_x *tl_info; static int topology_enabled = 1; static DECLARE_WORK(topology_work, topology_work_fn); -/* topology_lock protects the socket and book linked lists */ -static DEFINE_SPINLOCK(topology_lock); +/* + * Socket/Book linked lists and per_cpu(cpu_topology) updates are + * protected by "sched_domains_mutex". + */ static struct mask_info socket_info; static struct mask_info book_info; @@ -188,7 +193,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) { struct cpuid cpu_id; - spin_lock_irq(&topology_lock); get_cpu_id(&cpu_id); clear_masks(); switch (cpu_id.machine) { @@ -199,7 +203,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) default: __tl_to_masks_generic(info); } - spin_unlock_irq(&topology_lock); } static void topology_update_polarization_simple(void) @@ -244,10 +247,8 @@ int topology_set_cpu_management(int fc) static void update_cpu_masks(void) { - unsigned long flags; int cpu; - spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); @@ -259,7 +260,7 @@ static void update_cpu_masks(void) per_cpu(cpu_topology, cpu).book_id = cpu; } } - spin_unlock_irqrestore(&topology_lock, flags); + numa_update_cpu_topology(); } void store_topology(struct sysinfo_15_1_x *info) @@ -274,21 +275,21 @@ int arch_update_cpu_topology(void) { struct sysinfo_15_1_x *info = tl_info; struct device *dev; - int cpu; + int cpu, rc = 0; - if (!MACHINE_HAS_TOPOLOGY) { - update_cpu_masks(); - topology_update_polarization_simple(); - return 0; + if (MACHINE_HAS_TOPOLOGY) { + rc = 1; + store_topology(info); + tl_to_masks(info); } - store_topology(info); - tl_to_masks(info); update_cpu_masks(); + if (!MACHINE_HAS_TOPOLOGY) + topology_update_polarization_simple(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); kobject_uevent(&dev->kobj, KOBJ_CHANGE); } - return 1; + return rc; } static void topology_work_fn(struct work_struct *work) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 7bea81d8a363..9861613fb35a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -19,7 +19,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> -#include <asm/switch_to.h> +#include <asm/fpu-internal.h> #include "entry.h" int show_unhandled_signals = 1; @@ -151,7 +151,7 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, "transaction constraint exception") -static inline void do_fp_trap(struct pt_regs *regs, int fpc) +static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) { int si_code = 0; /* FPC[2] is Data Exception Code */ @@ -227,7 +227,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, int alloc_vector_registers(struct task_struct *tsk) { __vector128 *vxrs; - int i; + freg_t *fprs; /* Allocate vector register save area. */ vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS, @@ -236,15 +236,13 @@ int alloc_vector_registers(struct task_struct *tsk) return -ENOMEM; preempt_disable(); if (tsk == current) - save_fp_regs(tsk->thread.fp_regs.fprs); + save_fpu_regs(); /* Copy the 16 floating point registers */ - for (i = 0; i < 16; i++) - *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i]; - tsk->thread.vxrs = vxrs; - if (tsk == current) { - __ctl_set_bit(0, 17); - restore_vx_regs(vxrs); - } + convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); + fprs = tsk->thread.fpu.fprs; + tsk->thread.fpu.vxrs = vxrs; + tsk->thread.fpu.flags |= FPU_USE_VX; + kfree(fprs); preempt_enable(); return 0; } @@ -259,8 +257,8 @@ void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); - vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; + save_fpu_regs(); + vic = (current->thread.fpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ si_code = FPE_FLTINV; @@ -297,22 +295,22 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); + save_fpu_regs(); /* Check for vector register enablement */ - if (MACHINE_HAS_VX && !current->thread.vxrs && - (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { + if (MACHINE_HAS_VX && !is_vx_task(current) && + (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { alloc_vector_registers(current); /* Vector data exception is suppressing, rewind psw. */ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); clear_pt_regs_flag(regs, PIF_PER_TRAP); return; } - if (current->thread.fp_regs.fpc & FPC_DXC_MASK) + if (current->thread.fpu.fpc & FPC_DXC_MASK) signal = SIGFPE; else signal = SIGILL; if (signal == SIGFPE) - do_fp_trap(regs, current->thread.fp_regs.fpc); + do_fp_trap(regs, current->thread.fpu.fpc); else if (signal) do_trap(regs, signal, ILL_ILLOPN, "data exception"); } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index 8ad2b34ad151..ee8a18e50a25 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -13,7 +13,7 @@ KBUILD_AFLAGS_31 += -m31 -s KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 2a8ddfd12a5b..c4b03f9ed228 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -13,7 +13,7 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e53d3595a7c8..b9ce650e9e99 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -28,6 +28,7 @@ static atomic64_t virt_timer_elapsed; static DEFINE_PER_CPU(u64, mt_cycles[32]); static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; +static DEFINE_PER_CPU(u64, mt_scaling_jiffies); static inline u64 get_vtimer(void) { @@ -85,7 +86,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; /* Do MT utilization calculation */ - if (smp_cpu_mtid) { + if (smp_cpu_mtid && + time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { u64 cycles_new[32], *cycles_old; u64 delta, mult, div; @@ -105,6 +107,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) sizeof(u64) * (smp_cpu_mtid + 1)); } } + __this_cpu_write(mt_scaling_jiffies, jiffies_64); } user = S390_lowcore.user_timer - ti->user_timer; @@ -376,4 +379,11 @@ void vtime_init(void) { /* set initial cpu timer */ set_vtimer(VTIMER_MAX_SLICE); + /* Setup initial MT scaling values */ + if (smp_cpu_mtid) { + __this_cpu_write(mt_scaling_jiffies, jiffies); + __this_cpu_write(mt_scaling_mult, 1); + __this_cpu_write(mt_scaling_div, 1); + stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); + } } diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index fc7ec95848c3..5fbfb88f8477 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -27,13 +27,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu) start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; + vcpu->stat.diagnose_10++; if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end || start < 2 * PAGE_SIZE) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); - vcpu->stat.diagnose_10++; /* * We checked for start >= end above, so lets check for the @@ -75,6 +75,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); + VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", + vcpu->run->s.regs.gprs[rx]); + vcpu->stat.diagnose_258++; if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); @@ -85,6 +88,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) switch (parm.subcode) { case 0: /* TOKEN */ + VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " + "select mask 0x%llx compare mask 0x%llx", + parm.token_addr, parm.select_mask, parm.compare_mask); if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { /* * If the pagefault handshake is already activated, @@ -114,6 +120,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) * the cancel, therefore to reduce code complexity, we assume * all outstanding tokens are already pending. */ + VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); if (parm.token_addr || parm.select_mask || parm.compare_mask || parm.zarch) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -174,7 +181,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; - VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); + VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); + vcpu->stat.diagnose_308++; switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; @@ -202,6 +210,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) { int ret; + vcpu->stat.diagnose_500++; /* No virtio-ccw notification? Get out quickly. */ if (!vcpu->kvm->arch.css_support || (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index e97b3455d7e6..47518a324d75 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -473,10 +473,45 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->iprcc &= ~PGM_PER; } +#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) +#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH) +#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) +#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) + void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) { + int new_as; + if (debug_exit_required(vcpu)) vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; filter_guest_per_event(vcpu); + + /* + * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger + * a space-switch event. PER events enforce space-switch events + * for these instructions. So if no PER event for the guest is left, + * we might have to filter the space-switch element out, too. + */ + if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { + vcpu->arch.sie_block->iprcc = 0; + new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; + + /* + * If the AS changed from / to home, we had RP, SAC or SACF + * instruction. Check primary and home space-switch-event + * controls. (theoretically home -> home produced no event) + */ + if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) && + (pssec(vcpu) || hssec(vcpu))) + vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; + + /* + * PT, PTI, PR, PC instruction operate on primary AS only. Check + * if the primary-space-switch-event control was or got set. + */ + if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) && + (pssec(vcpu) || old_ssec(vcpu))) + vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; + } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..b277d50dcf76 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -30,7 +30,6 @@ #define IOINT_SCHID_MASK 0x0000ffff #define IOINT_SSID_MASK 0x00030000 #define IOINT_CSSID_MASK 0x03fc0000 -#define IOINT_AI_MASK 0x04000000 #define PFAULT_INIT 0x0600 #define PFAULT_DONE 0x0680 #define VIRTIO_PARAM 0x0d00 @@ -72,9 +71,13 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { + preempt_disable(); if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { + preempt_enable(); return 0; + } + preempt_enable(); return ckc_interrupts_enabled(vcpu); } @@ -311,8 +314,8 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) li->irq.ext.ext_params2 = 0; spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", - 0, ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", + ext.ext_params2); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 0, ext.ext_params2); @@ -368,7 +371,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (deliver) { - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", mchk.mcic); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, @@ -403,7 +406,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); + VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); vcpu->stat.deliver_restart_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); @@ -427,7 +430,6 @@ static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); vcpu->stat.deliver_prefix_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, @@ -450,7 +452,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, cpu_addr, 0); @@ -477,7 +479,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, @@ -506,7 +508,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) memset(&li->irq.pgm, 0, sizeof(pgm_info)); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", + VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", pgm_info.code, ilc); vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, @@ -622,7 +624,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); spin_unlock(&fi->lock); - VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", + VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", ext.ext_params); vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, @@ -651,9 +653,6 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info, list); if (inti) { - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, - KVM_S390_INT_PFAULT_DONE, 0, - inti->ext.ext_params2); list_del(&inti->list); fi->counters[FIRQ_CNTR_PFAULT] -= 1; } @@ -662,6 +661,12 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (inti) { + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", + inti->ext.ext_params2); + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); rc |= put_guest_lc(vcpu, PFAULT_DONE, @@ -691,7 +696,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) list); if (inti) { VCPU_EVENT(vcpu, 4, - "interrupt: virtio parm:%x,parm64:%llx", + "deliver: virtio parm: 0x%x,parm64: 0x%llx", inti->ext.ext_params, inti->ext.ext_params2); vcpu->stat.deliver_virtio_interrupt++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, @@ -741,7 +746,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info, list); if (inti) { - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type); vcpu->stat.deliver_io_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, @@ -855,7 +860,9 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } + preempt_disable(); now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + preempt_enable(); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* underflow */ @@ -864,7 +871,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) __set_cpu_idle(vcpu); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); - VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); + VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); @@ -894,7 +901,9 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); + preempt_disable(); now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + preempt_enable(); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* @@ -968,6 +977,10 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, + irq->u.pgm.code, 0); + li->irq.pgm = irq->u.pgm; set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; @@ -978,9 +991,6 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_irq irq; - VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, - 0, 1); spin_lock(&li->lock); irq.u.pgm.code = code; __inject_prog(vcpu, &irq); @@ -996,10 +1006,6 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, struct kvm_s390_irq irq; int rc; - VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", - pgm_info->code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, - pgm_info->code, 0, 1); spin_lock(&li->lock); irq.u.pgm = *pgm_info; rc = __inject_prog(vcpu, &irq); @@ -1012,11 +1018,11 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", - irq->u.ext.ext_params, irq->u.ext.ext_params2); + VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", + irq->u.ext.ext_params2); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, irq->u.ext.ext_params, - irq->u.ext.ext_params2, 2); + irq->u.ext.ext_params2); li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); @@ -1045,10 +1051,10 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_extcall_info *extcall = &li->irq.extcall; uint16_t src_id = irq->u.extcall.code; - VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", + VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", src_id); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, - src_id, 0, 2); + src_id, 0); /* sending vcpu invalid */ if (src_id >= KVM_MAX_VCPUS || @@ -1070,10 +1076,10 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_prefix_info *prefix = &li->irq.prefix; - VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", irq->u.prefix.address); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, - irq->u.prefix.address, 0, 2); + irq->u.prefix.address, 0); if (!is_vcpu_stopped(vcpu)) return -EBUSY; @@ -1090,7 +1096,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_stop_info *stop = &li->irq.stop; int rc = 0; - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) return -EINVAL; @@ -1114,8 +1120,8 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); + VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); set_bit(IRQ_PEND_RESTART, &li->pending_irqs); return 0; @@ -1126,10 +1132,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", + VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", irq->u.emerg.code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, - irq->u.emerg.code, 0, 2); + irq->u.emerg.code, 0); set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); @@ -1142,10 +1148,10 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info *mchk = &li->irq.mchk; - VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", irq->u.mchk.mcic); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, - irq->u.mchk.mcic, 2); + irq->u.mchk.mcic); /* * Because repressible machine checks can be indicated along with @@ -1172,9 +1178,9 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); + VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); @@ -1185,9 +1191,9 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); + VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); @@ -1435,20 +1441,20 @@ int kvm_s390_inject_vm(struct kvm *kvm, inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_INT_SERVICE: - VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); + VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); inti->ext.ext_params = s390int->parm; break; case KVM_S390_INT_PFAULT_DONE: inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_MCHK: - VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", + VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", s390int->parm64); inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ inti->mchk.mcic = s390int->parm64; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (inti->type & IOINT_AI_MASK) + if (inti->type & KVM_S390_INT_IO_AI_MASK) VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); else VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", @@ -1535,8 +1541,6 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) switch (irq->type) { case KVM_S390_PROGRAM_INT: - VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", - irq->u.pgm.code); rc = __inject_prog(vcpu, irq); break; case KVM_S390_SIGP_SET_PREFIX: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f32f843a3631..98df53c01343 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -28,6 +28,7 @@ #include <linux/vmalloc.h> #include <asm/asm-offsets.h> #include <asm/lowcore.h> +#include <asm/etr.h> #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/switch_to.h> @@ -108,6 +109,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { "diagnose_9c", VCPU_STAT(diagnose_9c) }, + { "diagnose_258", VCPU_STAT(diagnose_258) }, + { "diagnose_308", VCPU_STAT(diagnose_308) }, + { "diagnose_500", VCPU_STAT(diagnose_500) }, { NULL } }; @@ -124,6 +128,7 @@ unsigned long kvm_s390_fac_list_mask_size(void) } static struct gmap_notifier gmap_notifier; +debug_info_t *kvm_s390_dbf; /* Section: not file related */ int kvm_arch_hardware_enable(void) @@ -134,24 +139,69 @@ int kvm_arch_hardware_enable(void) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); +/* + * This callback is executed during stop_machine(). All CPUs are therefore + * temporarily stopped. In order not to change guest behavior, we have to + * disable preemption whenever we touch the epoch of kvm and the VCPUs, + * so a CPU won't be stopped while calculating with the epoch. + */ +static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, + void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + int i; + unsigned long long *delta = v; + + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm->arch.epoch -= *delta; + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu->arch.sie_block->epoch -= *delta; + } + } + return NOTIFY_OK; +} + +static struct notifier_block kvm_clock_notifier = { + .notifier_call = kvm_clock_sync, +}; + int kvm_arch_hardware_setup(void) { gmap_notifier.notifier_call = kvm_gmap_notifier; gmap_register_ipte_notifier(&gmap_notifier); + atomic_notifier_chain_register(&s390_epoch_delta_notifier, + &kvm_clock_notifier); return 0; } void kvm_arch_hardware_unsetup(void) { gmap_unregister_ipte_notifier(&gmap_notifier); + atomic_notifier_chain_unregister(&s390_epoch_delta_notifier, + &kvm_clock_notifier); } int kvm_arch_init(void *opaque) { + kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); + if (!kvm_s390_dbf) + return -ENOMEM; + + if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { + debug_unregister(kvm_s390_dbf); + return -ENOMEM; + } + /* Register floating interrupt controller interface. */ return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); } +void kvm_arch_exit(void) +{ + debug_unregister(kvm_s390_dbf); +} + /* Section: device related */ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) @@ -281,10 +331,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) switch (cap->cap) { case KVM_CAP_S390_IRQCHIP: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); kvm->arch.use_irqchip = 1; r = 0; break; case KVM_CAP_S390_USER_SIGP: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); kvm->arch.user_sigp = 1; r = 0; break; @@ -295,8 +347,11 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; } else r = -EINVAL; + VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", + r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_USER_STSI: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); kvm->arch.user_stsi = 1; r = 0; break; @@ -314,6 +369,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att switch (attr->attr) { case KVM_S390_VM_MEM_LIMIT_SIZE: ret = 0; + VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", + kvm->arch.gmap->asce_end); if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) ret = -EFAULT; break; @@ -330,7 +387,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att unsigned int idx; switch (attr->attr) { case KVM_S390_VM_MEM_ENABLE_CMMA: + /* enable CMMA only for z10 and later (EDAT_1) */ + ret = -EINVAL; + if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1) + break; + ret = -EBUSY; + VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) == 0) { kvm->arch.use_cmma = 1; @@ -339,6 +402,11 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att mutex_unlock(&kvm->lock); break; case KVM_S390_VM_MEM_CLR_CMMA: + ret = -EINVAL; + if (!kvm->arch.use_cmma) + break; + + VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); s390_reset_cmma(kvm->arch.gmap->mm); @@ -374,6 +442,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att } } mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit); break; } default: @@ -400,22 +469,26 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); kvm->arch.crypto.aes_kw = 1; + VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); kvm->arch.crypto.dea_kw = 1; + VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); + VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); break; default: mutex_unlock(&kvm->lock); @@ -440,6 +513,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (gtod_high != 0) return -EINVAL; + VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high); return 0; } @@ -459,12 +533,15 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) return r; mutex_lock(&kvm->lock); + preempt_disable(); kvm->arch.epoch = gtod - host_tod; kvm_s390_vcpu_block_all(kvm); kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; kvm_s390_vcpu_unblock_all(kvm); + preempt_enable(); mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); return 0; } @@ -496,6 +573,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_to_user((void __user *)attr->addr, >od_high, sizeof(gtod_high))) return -EFAULT; + VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high); return 0; } @@ -509,9 +587,12 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) if (r) return r; + preempt_disable(); gtod = host_tod + kvm->arch.epoch; + preempt_enable(); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; + VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); return 0; } @@ -821,7 +902,9 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) } /* Enable storage key handling for the guest */ - s390_enable_skey(); + r = s390_enable_skey(); + if (r) + goto out; for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -879,8 +962,7 @@ long kvm_arch_vm_ioctl(struct file *filp, if (kvm->arch.use_irqchip) { /* Set up dummy routing. */ memset(&routing, 0, sizeof(routing)); - kvm_set_irq_routing(kvm, &routing, 0, 0); - r = 0; + r = kvm_set_irq_routing(kvm, &routing, 0, 0); } break; } @@ -1043,7 +1125,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) sprintf(debug_name, "kvm-%u", current->pid); - kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); + kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); if (!kvm->arch.dbf) goto out_err; @@ -1086,7 +1168,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) mutex_init(&kvm->arch.ipte_mutex); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); - VM_EVENT(kvm, 3, "%s", "vm created"); + VM_EVENT(kvm, 3, "vm created with type %lu", type); if (type & KVM_VM_S390_UCONTROL) { kvm->arch.gmap = NULL; @@ -1103,6 +1185,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.epoch = 0; spin_lock_init(&kvm->arch.start_stop_lock); + KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); return 0; out_err: @@ -1110,6 +1193,7 @@ out_err: free_page((unsigned long)kvm->arch.model.fac); debug_unregister(kvm->arch.dbf); free_page((unsigned long)(kvm->arch.sca)); + KVM_EVENT(3, "creation of vm failed: %d", rc); return rc; } @@ -1131,7 +1215,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) if (kvm_is_ucontrol(vcpu->kvm)) gmap_free(vcpu->arch.gmap); - if (kvm_s390_cmma_enabled(vcpu->kvm)) + if (vcpu->kvm->arch.use_cmma) kvm_s390_vcpu_unsetup_cmma(vcpu); free_page((unsigned long)(vcpu->arch.sie_block)); @@ -1166,6 +1250,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) gmap_free(kvm->arch.gmap); kvm_s390_destroy_adapters(kvm); kvm_s390_clear_float_irqs(kvm); + KVM_EVENT(3, "vm 0x%p destroyed", kvm); } /* Section: vcpu related */ @@ -1198,21 +1283,54 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } +/* + * Backs up the current FP/VX register save area on a particular + * destination. Used to switch between different register save + * areas. + */ +static inline void save_fpu_to(struct fpu *dst) +{ + dst->fpc = current->thread.fpu.fpc; + dst->flags = current->thread.fpu.flags; + dst->regs = current->thread.fpu.regs; +} + +/* + * Switches the FP/VX register save area from which to lazy + * restore register contents. + */ +static inline void load_fpu_from(struct fpu *from) +{ + current->thread.fpu.fpc = from->fpc; + current->thread.fpu.flags = from->flags; + current->thread.fpu.regs = from->regs; +} + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - save_fp_ctl(&vcpu->arch.host_fpregs.fpc); - if (test_kvm_facility(vcpu->kvm, 129)) - save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); - else - save_fp_regs(vcpu->arch.host_fpregs.fprs); - save_access_regs(vcpu->arch.host_acrs); + /* Save host register state */ + save_fpu_regs(); + save_fpu_to(&vcpu->arch.host_fpregs); + if (test_kvm_facility(vcpu->kvm, 129)) { - restore_fp_ctl(&vcpu->run->s.regs.fpc); - restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); - } else { - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); - } + current->thread.fpu.fpc = vcpu->run->s.regs.fpc; + current->thread.fpu.flags = FPU_USE_VX; + /* + * Use the register save area in the SIE-control block + * for register restore and save in kvm_arch_vcpu_put() + */ + current->thread.fpu.vxrs = + (__vector128 *)&vcpu->run->s.regs.vrs; + /* Always enable the vector extension for KVM */ + __ctl_set_vx(); + } else + load_fpu_from(&vcpu->arch.guest_fpregs); + + if (test_fp_ctl(current->thread.fpu.fpc)) + /* User space provided an invalid FPC, let's clear it */ + current->thread.fpu.fpc = 0; + + save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -1222,19 +1340,22 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - if (test_kvm_facility(vcpu->kvm, 129)) { - save_fp_ctl(&vcpu->run->s.regs.fpc); - save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); - } else { - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); - } - save_access_regs(vcpu->run->s.regs.acrs); - restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); + + save_fpu_regs(); + if (test_kvm_facility(vcpu->kvm, 129)) - restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + /* + * kvm_arch_vcpu_load() set up the register save area to + * the &vcpu->run->s.regs.vrs and, thus, the vector registers + * are already saved. Only the floating-point control must be + * copied. + */ + vcpu->run->s.regs.fpc = current->thread.fpu.fpc; else - restore_fp_regs(vcpu->arch.host_fpregs.fprs); + save_fpu_to(&vcpu->arch.guest_fpregs); + load_fpu_from(&vcpu->arch.host_fpregs); + + save_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->arch.host_acrs); } @@ -1264,7 +1385,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { mutex_lock(&vcpu->kvm->lock); + preempt_disable(); vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; + preempt_enable(); mutex_unlock(&vcpu->kvm->lock); if (!kvm_is_ucontrol(vcpu->kvm)) vcpu->arch.gmap = vcpu->kvm->arch.gmap; @@ -1342,7 +1465,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) } vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; - if (kvm_s390_cmma_enabled(vcpu->kvm)) { + if (vcpu->kvm->arch.use_cmma) { rc = kvm_s390_vcpu_setup_cmma(vcpu); if (rc) return rc; @@ -1377,7 +1500,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; - vcpu->arch.host_vregs = &sie_page->vregs; vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { @@ -1399,6 +1521,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; + /* + * Allocate a save area for floating-point registers. If the vector + * extension is available, register contents are saved in the SIE + * control block. The allocated save area is still required in + * particular places, for example, in kvm_s390_vcpu_store_status(). + */ + vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, + GFP_KERNEL); + if (!vcpu->arch.guest_fpregs.fprs) { + rc = -ENOMEM; + goto out_free_sie_block; + } + rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) goto out_free_sie_block; @@ -1621,16 +1756,16 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { if (test_fp_ctl(fpu->fpc)) return -EINVAL; - memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); + memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); vcpu->arch.guest_fpregs.fpc = fpu->fpc; - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + save_fpu_regs(); + load_fpu_from(&vcpu->arch.guest_fpregs); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); + memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); fpu->fpc = vcpu->arch.guest_fpregs.fpc; return 0; } @@ -1723,18 +1858,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return rc; } -bool kvm_s390_cmma_enabled(struct kvm *kvm) -{ - if (!MACHINE_IS_LPAR) - return false; - /* only enable for z10 and later */ - if (!MACHINE_HAS_EDAT1) - return false; - if (!kvm->arch.use_cmma) - return false; - return true; -} - static bool ibs_enabled(struct kvm_vcpu *vcpu) { return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; @@ -2193,8 +2316,21 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * copying in vcpu load/put. Lets update our copies before we save * it into the save area */ - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); + save_fpu_regs(); + if (test_kvm_facility(vcpu->kvm, 129)) { + /* + * If the vector extension is available, the vector registers + * which overlaps with floating-point registers are saved in + * the SIE-control block. Hence, extract the floating-point + * registers and the FPC value and store them in the + * guest_fpregs structure. + */ + WARN_ON(!is_vx_task(current)); /* XXX remove later */ + vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; + convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, + current->thread.fpu.vxrs); + } else + save_fpu_to(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->run->s.regs.acrs); return kvm_s390_store_status_unloaded(vcpu, addr); @@ -2221,10 +2357,13 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) /* * The guest VXRS are in the host VXRs due to the lazy - * copying in vcpu load/put. Let's update our copies before we save - * it into the save area. + * copying in vcpu load/put. We can simply call save_fpu_regs() + * to save the current register state because we are in the + * middle of a load/put cycle. + * + * Let's update our copies before we save it into the save area. */ - save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + save_fpu_regs(); return kvm_s390_store_adtl_status_unloaded(vcpu, addr); } @@ -2340,6 +2479,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_S390_CSS_SUPPORT: if (!vcpu->kvm->arch.css_support) { vcpu->kvm->arch.css_support = 1; + VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); trace_kvm_s390_enable_css(vcpu->kvm); } r = 0; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c5704786e473..c446aabf60d3 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -27,6 +27,13 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); #define TDB_FORMAT1 1 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) +extern debug_info_t *kvm_s390_dbf; +#define KVM_EVENT(d_loglevel, d_string, d_args...)\ +do { \ + debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \ + d_args); \ +} while (0) + #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ do { \ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ @@ -65,6 +72,8 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) { + VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, + prefix); vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); @@ -217,8 +226,6 @@ void exit_sie(struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); -/* is cmma enabled */ -bool kvm_s390_cmma_enabled(struct kvm *kvm); unsigned long kvm_s390_fac_list_mask_size(void); extern unsigned long kvm_s390_fac_list_mask[]; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ad4242245771..4d21dc4d1a84 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -53,11 +53,14 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) kvm_s390_set_psw_cc(vcpu, 3); return 0; } + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); + preempt_disable(); kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; + preempt_enable(); mutex_unlock(&vcpu->kvm->lock); kvm_s390_set_psw_cc(vcpu, 0); @@ -98,8 +101,6 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); - - VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); return 0; } @@ -129,7 +130,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); + VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); trace_kvm_s390_handle_prefix(vcpu, 0, address); return 0; } @@ -155,7 +156,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); + VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); trace_kvm_s390_handle_stap(vcpu, ga); return 0; } @@ -167,6 +168,7 @@ static int __skey_check_enable(struct kvm_vcpu *vcpu) return rc; rc = s390_enable_skey(); + VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest"); trace_kvm_s390_skey_related_inst(vcpu); vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); return rc; @@ -370,7 +372,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) &fac, sizeof(fac)); if (rc) return rc; - VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); + VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); trace_kvm_s390_handle_stfl(vcpu, fac); return 0; } @@ -468,7 +470,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); + VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); return 0; } @@ -521,7 +523,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ar_t ar; vcpu->stat.instruction_stsi++; - VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); + VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -758,10 +760,10 @@ static int handle_essa(struct kvm_vcpu *vcpu) struct gmap *gmap; int i; - VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); + VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); gmap = vcpu->arch.gmap; vcpu->stat.instruction_essa++; - if (!kvm_s390_cmma_enabled(vcpu->kvm)) + if (!vcpu->kvm->arch.use_cmma) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) @@ -829,7 +831,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; @@ -868,7 +870,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); reg = reg1; @@ -902,7 +904,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; @@ -940,7 +942,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); reg = reg1; diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 72e58bd2bee7..da690b69f9fe 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -205,9 +205,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; return SIGP_CC_STATUS_STORED; - } else if (rc == 0) { - VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", - dst_vcpu->vcpu_id, irq.u.prefix.address); } return rc; @@ -371,7 +368,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, return rc; } -static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) +static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, + u16 cpu_addr) { if (!vcpu->kvm->arch.user_sigp) return 0; @@ -414,9 +412,8 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) default: vcpu->stat.instruction_sigp_unknown++; } - - VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", - order_code); + VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", + order_code, cpu_addr); return 1; } @@ -435,7 +432,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); - if (handle_sigp_order_in_user_space(vcpu, order_code)) + if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) return -EOPNOTSUPP; if (r1 % 2) diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 3208d33a48cb..cc1d6c68356f 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -105,11 +105,22 @@ TRACE_EVENT(kvm_s390_vcpu_start_stop, {KVM_S390_PROGRAM_INT, "program interrupt"}, \ {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \ {KVM_S390_RESTART, "sigp restart"}, \ + {KVM_S390_INT_PFAULT_INIT, "pfault init"}, \ + {KVM_S390_INT_PFAULT_DONE, "pfault done"}, \ + {KVM_S390_MCHK, "machine check"}, \ + {KVM_S390_INT_CLOCK_COMP, "clock comparator"}, \ + {KVM_S390_INT_CPU_TIMER, "cpu timer"}, \ {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \ {KVM_S390_INT_SERVICE, "sclp interrupt"}, \ {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \ {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"} +#define get_irq_name(__type) \ + (__type > KVM_S390_INT_IO_MAX ? \ + __print_symbolic(__type, kvm_s390_int_type) : \ + (__type & KVM_S390_INT_IO_AI_MASK ? \ + "adapter I/O interrupt" : "subchannel I/O interrupt")) + TRACE_EVENT(kvm_s390_inject_vm, TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who), TP_ARGS(type, parm, parm64, who), @@ -131,22 +142,19 @@ TRACE_EVENT(kvm_s390_inject_vm, TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx", (__entry->who == 1) ? " (from kernel)" : (__entry->who == 2) ? " (from user)" : "", - __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), + __entry->inttype, get_irq_name(__entry->inttype), __entry->parm, __entry->parm64) ); TRACE_EVENT(kvm_s390_inject_vcpu, - TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \ - int who), - TP_ARGS(id, type, parm, parm64, who), + TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64), + TP_ARGS(id, type, parm, parm64), TP_STRUCT__entry( __field(int, id) __field(__u32, inttype) __field(__u32, parm) __field(__u64, parm64) - __field(int, who) ), TP_fast_assign( @@ -154,15 +162,12 @@ TRACE_EVENT(kvm_s390_inject_vcpu, __entry->inttype = type & 0x00000000ffffffff; __entry->parm = parm; __entry->parm64 = parm64; - __entry->who = who; ), - TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx", - (__entry->who == 1) ? " (from kernel)" : - (__entry->who == 2) ? " (from user)" : "", + TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx", __entry->id, __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), - __entry->parm, __entry->parm64) + get_irq_name(__entry->inttype), __entry->parm, + __entry->parm64) ); /* @@ -189,8 +194,8 @@ TRACE_EVENT(kvm_s390_deliver_interrupt, TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ "data:%08llx %016llx", __entry->id, __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), - __entry->data0, __entry->data1) + get_irq_name(__entry->inttype), __entry->data0, + __entry->data1) ); /* diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 16dc42d83f93..246a7eb4b680 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -26,6 +26,7 @@ void __delay(unsigned long loops) */ asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); } +EXPORT_SYMBOL(__delay); static void __udelay_disabled(unsigned long long usecs) { diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 4614d415bb58..0d002a746bec 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -370,22 +370,9 @@ long __strncpy_from_user(char *dst, const char __user *src, long size) } EXPORT_SYMBOL(__strncpy_from_user); -/* - * The "old" uaccess variant without mvcos can be enforced with the - * uaccess_primary kernel parameter. This is mainly for debugging purposes. - */ -static int uaccess_primary __initdata; - -static int __init parse_uaccess_pt(char *__unused) -{ - uaccess_primary = 1; - return 0; -} -early_param("uaccess_primary", parse_uaccess_pt); - static int __init uaccess_init(void) { - if (!uaccess_primary && test_facility(27)) + if (test_facility(27)) static_key_slow_inc(&have_mvcos); return 0; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 4c8f5d7f9c23..f985856a538b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -646,7 +646,7 @@ static void pfault_interrupt(struct ext_code ext_code, return; inc_irq_stat(IRQEXT_PFL); /* Get the token (= pid of the affected task). */ - pid = sizeof(void *) == 4 ? param32 : param64; + pid = param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 1eb41bb3010c..12bbf0e8478f 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -30,6 +30,9 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, do { pte = *ptep; barrier(); + /* Similar to the PMD case, NUMA hinting must take slow path */ + if (pte_protnone(pte)) + return 0; if ((pte_val(pte) & mask) != 0) return 0; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); @@ -125,6 +128,13 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_protnone(pmd)) + return 0; if (!gup_huge_pmd(pmdp, pmd, addr, next, write, pages, nr)) return 0; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 76e873748b56..2963b563621c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -27,6 +27,7 @@ #include <linux/initrd.h> #include <linux/export.h> #include <linux/gfp.h> +#include <linux/memblock.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -138,7 +139,7 @@ void __init mem_init(void) cpumask_set_cpu(0, mm_cpumask(&init_mm)); atomic_set(&init_mm.context.attach_count, 1); - max_mapnr = max_low_pfn; + set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ @@ -170,37 +171,36 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { - unsigned long zone_start_pfn, zone_end_pfn, nr_pages; + unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); + unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); - struct zone *zone; - int rc; + unsigned long nr_pages; + int rc, zone_enum; rc = vmem_add_mapping(start, size); if (rc) return rc; - for_each_zone(zone) { - if (zone_idx(zone) != ZONE_MOVABLE) { - /* Add range within existing zone limits */ - zone_start_pfn = zone->zone_start_pfn; - zone_end_pfn = zone->zone_start_pfn + - zone->spanned_pages; + + while (size_pages > 0) { + if (start_pfn < dma_end_pfn) { + nr_pages = (start_pfn + size_pages > dma_end_pfn) ? + dma_end_pfn - start_pfn : size_pages; + zone_enum = ZONE_DMA; + } else if (start_pfn < normal_end_pfn) { + nr_pages = (start_pfn + size_pages > normal_end_pfn) ? + normal_end_pfn - start_pfn : size_pages; + zone_enum = ZONE_NORMAL; } else { - /* Add remaining range to ZONE_MOVABLE */ - zone_start_pfn = start_pfn; - zone_end_pfn = start_pfn + size_pages; + nr_pages = size_pages; + zone_enum = ZONE_MOVABLE; } - if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) - continue; - nr_pages = (start_pfn + size_pages > zone_end_pfn) ? - zone_end_pfn - start_pfn : size_pages; - rc = __add_pages(nid, zone, start_pfn, nr_pages); + rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, + start_pfn, size_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; - if (!size_pages) - break; } if (rc) vmem_remove_mapping(start, size); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b33f66110ca9..54ef3bc01b43 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -10,11 +10,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> -#include <linux/highmem.h> -#include <linux/pagemap.h> #include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/quicklist.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/swapops.h> @@ -28,12 +24,9 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#define ALLOC_ORDER 2 -#define FRAG_MASK 0x03 - unsigned long *crst_table_alloc(struct mm_struct *mm) { - struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + struct page *page = alloc_pages(GFP_KERNEL, 2); if (!page) return NULL; @@ -42,7 +35,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) void crst_table_free(struct mm_struct *mm, unsigned long *table) { - free_pages((unsigned long) table, ALLOC_ORDER); + free_pages((unsigned long) table, 2); } static void __crst_table_upgrade(void *arg) @@ -176,7 +169,7 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); spin_lock_init(&gmap->guest_table_lock); gmap->mm = mm; - page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + page = alloc_pages(GFP_KERNEL, 2); if (!page) goto out_free; page->index = 0; @@ -247,7 +240,7 @@ void gmap_free(struct gmap *gmap) /* Free all segment & region tables. */ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) - __free_pages(page, ALLOC_ORDER); + __free_pages(page, 2); gmap_radix_tree_free(&gmap->guest_to_host); gmap_radix_tree_free(&gmap->host_to_guest); down_write(&gmap->mm->mmap_sem); @@ -287,7 +280,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, unsigned long *new; /* since we dont free the gmap table until gmap_free we can unlock */ - page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + page = alloc_pages(GFP_KERNEL, 2); if (!page) return -ENOMEM; new = (unsigned long *) page_to_phys(page); @@ -302,7 +295,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, } spin_unlock(&gmap->mm->page_table_lock); if (page) - __free_pages(page, ALLOC_ORDER); + __free_pages(page, 2); return 0; } @@ -795,40 +788,6 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) } EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); -static inline int page_table_with_pgste(struct page *page) -{ - return atomic_read(&page->_mapcount) == 0; -} - -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) -{ - struct page *page; - unsigned long *table; - - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - atomic_set(&page->_mapcount, 0); - table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); - return table; -} - -static inline void page_table_free_pgste(unsigned long *table) -{ - struct page *page; - - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); -} - int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq) { @@ -957,20 +916,6 @@ __initcall(page_table_register_sysctl); #else /* CONFIG_PGSTE */ -static inline int page_table_with_pgste(struct page *page) -{ - return 0; -} - -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) -{ - return NULL; -} - -static inline void page_table_free_pgste(unsigned long *table) -{ -} - static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, unsigned long vmaddr) { @@ -994,44 +939,55 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) */ unsigned long *page_table_alloc(struct mm_struct *mm) { - unsigned long *uninitialized_var(table); - struct page *uninitialized_var(page); + unsigned long *table; + struct page *page; unsigned int mask, bit; - if (mm_alloc_pgste(mm)) - return page_table_alloc_pgste(mm); - /* Allocate fragments of a 4K page as 1K/2K page table */ - spin_lock_bh(&mm->context.list_lock); - mask = FRAG_MASK; - if (!list_empty(&mm->context.pgtable_list)) { - page = list_first_entry(&mm->context.pgtable_list, - struct page, lru); - table = (unsigned long *) page_to_phys(page); - mask = atomic_read(&page->_mapcount); - mask = mask | (mask >> 4); - } - if ((mask & FRAG_MASK) == FRAG_MASK) { - spin_unlock_bh(&mm->context.list_lock); - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; + /* Try to get a fragment of a 4K page as a 2K page table */ + if (!mm_alloc_pgste(mm)) { + table = NULL; + spin_lock_bh(&mm->context.list_lock); + if (!list_empty(&mm->context.pgtable_list)) { + page = list_first_entry(&mm->context.pgtable_list, + struct page, lru); + mask = atomic_read(&page->_mapcount); + mask = (mask | (mask >> 4)) & 3; + if (mask != 3) { + table = (unsigned long *) page_to_phys(page); + bit = mask & 1; /* =1 -> second 2K */ + if (bit) + table += PTRS_PER_PTE; + atomic_xor_bits(&page->_mapcount, 1U << bit); + list_del(&page->lru); + } } + spin_unlock_bh(&mm->context.list_lock); + if (table) + return table; + } + /* Allocate a fresh page */ + page = alloc_page(GFP_KERNEL|__GFP_REPEAT); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } + /* Initialize page table */ + table = (unsigned long *) page_to_phys(page); + if (mm_alloc_pgste(mm)) { + /* Return 4K page table with PGSTEs */ + atomic_set(&page->_mapcount, 3); + clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); + clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + } else { + /* Return the first 2K fragment of the page */ atomic_set(&page->_mapcount, 1); - table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_INVALID, PAGE_SIZE); spin_lock_bh(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); - } else { - for (bit = 1; mask & bit; bit <<= 1) - table += PTRS_PER_PTE; - mask = atomic_xor_bits(&page->_mapcount, bit); - if ((mask & FRAG_MASK) == FRAG_MASK) - list_del(&page->lru); + spin_unlock_bh(&mm->context.list_lock); } - spin_unlock_bh(&mm->context.list_lock); return table; } @@ -1041,37 +997,23 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) unsigned int bit, mask; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (page_table_with_pgste(page)) - return page_table_free_pgste(table); - /* Free 1K/2K page table fragment of a 4K page */ - bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); - spin_lock_bh(&mm->context.list_lock); - if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) - list_del(&page->lru); - mask = atomic_xor_bits(&page->_mapcount, bit); - if (mask & FRAG_MASK) - list_add(&page->lru, &mm->context.pgtable_list); - spin_unlock_bh(&mm->context.list_lock); - if (mask == 0) { - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); + if (!mm_alloc_pgste(mm)) { + /* Free 2K page table fragment of a 4K page */ + bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); + spin_lock_bh(&mm->context.list_lock); + mask = atomic_xor_bits(&page->_mapcount, 1U << bit); + if (mask & 3) + list_add(&page->lru, &mm->context.pgtable_list); + else + list_del(&page->lru); + spin_unlock_bh(&mm->context.list_lock); + if (mask != 0) + return; } -} - -static void __page_table_free_rcu(void *table, unsigned bit) -{ - struct page *page; - if (bit == FRAG_MASK) - return page_table_free_pgste(table); - /* Free 1K/2K page table fragment of a 4K page */ - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (atomic_xor_bits(&page->_mapcount, bit) == 0) { - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); - } + pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); + __free_page(page); } void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, @@ -1083,34 +1025,45 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, mm = tlb->mm; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (page_table_with_pgste(page)) { + if (mm_alloc_pgste(mm)) { gmap_unlink(mm, table, vmaddr); - table = (unsigned long *) (__pa(table) | FRAG_MASK); + table = (unsigned long *) (__pa(table) | 3); tlb_remove_table(tlb, table); return; } - bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); + bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.list_lock); - if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) - list_del(&page->lru); - mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); - if (mask & FRAG_MASK) + mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); + if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); + else + list_del(&page->lru); spin_unlock_bh(&mm->context.list_lock); - table = (unsigned long *) (__pa(table) | (bit << 4)); + table = (unsigned long *) (__pa(table) | (1U << bit)); tlb_remove_table(tlb, table); } static void __tlb_remove_table(void *_table) { - const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; - void *table = (void *)((unsigned long) _table & ~mask); - unsigned type = (unsigned long) _table & mask; - - if (type) - __page_table_free_rcu(table, type); - else - free_pages((unsigned long) table, ALLOC_ORDER); + unsigned int mask = (unsigned long) _table & 3; + void *table = (void *)((unsigned long) _table ^ mask); + struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + + switch (mask) { + case 0: /* pmd or pud */ + free_pages((unsigned long) table, 2); + break; + case 1: /* lower 2K of a 4K page table */ + case 2: /* higher 2K of a 4K page table */ + if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) + break; + /* fallthrough */ + case 3: /* 4K page table with pgstes */ + pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); + __free_page(page); + break; + } } static void tlb_remove_table_smp_sync(void *arg) diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile new file mode 100644 index 000000000000..f94ecaffa71b --- /dev/null +++ b/arch/s390/numa/Makefile @@ -0,0 +1,3 @@ +obj-y += numa.o +obj-y += toptree.o +obj-$(CONFIG_NUMA_EMU) += mode_emu.o diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c new file mode 100644 index 000000000000..7de4e2f780d7 --- /dev/null +++ b/arch/s390/numa/mode_emu.c @@ -0,0 +1,530 @@ +/* + * NUMA support for s390 + * + * NUMA emulation (aka fake NUMA) distributes the available memory to nodes + * without using real topology information about the physical memory of the + * machine. + * + * It distributes the available CPUs to nodes while respecting the original + * machine topology information. This is done by trying to avoid to separate + * CPUs which reside on the same book or even on the same MC. + * + * Because the current Linux scheduler code requires a stable cpu to node + * mapping, cores are pinned to nodes when the first CPU thread is set online. + * + * Copyright IBM Corp. 2015 + */ + +#define KMSG_COMPONENT "numa_emu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/memblock.h> +#include <linux/node.h> +#include <linux/memory.h> +#include <linux/slab.h> +#include <asm/smp.h> +#include <asm/topology.h> +#include "numa_mode.h" +#include "toptree.h" + +/* Distances between the different system components */ +#define DIST_EMPTY 0 +#define DIST_CORE 1 +#define DIST_MC 2 +#define DIST_BOOK 3 +#define DIST_MAX 4 + +/* Node distance reported to common code */ +#define EMU_NODE_DIST 10 + +/* Node ID for free (not yet pinned) cores */ +#define NODE_ID_FREE -1 + +/* Different levels of toptree */ +enum toptree_level {CORE, MC, BOOK, NODE, TOPOLOGY}; + +/* The two toptree IDs */ +enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA}; + +/* Number of NUMA nodes */ +static int emu_nodes = 1; +/* NUMA stripe size */ +static unsigned long emu_size; + +/* + * Node to core pinning information updates are protected by + * "sched_domains_mutex". + */ +static struct { + s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */ + int total; /* Total number of pinned cores */ + int per_node_target; /* Cores per node without extra cores */ + int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */ +} *emu_cores; + +/* + * Pin a core to a node + */ +static void pin_core_to_node(int core_id, int node_id) +{ + if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) { + emu_cores->per_node[node_id]++; + emu_cores->to_node_id[core_id] = node_id; + emu_cores->total++; + } else { + WARN_ON(emu_cores->to_node_id[core_id] != node_id); + } +} + +/* + * Number of pinned cores of a node + */ +static int cores_pinned(struct toptree *node) +{ + return emu_cores->per_node[node->id]; +} + +/* + * ID of the node where the core is pinned (or NODE_ID_FREE) + */ +static int core_pinned_to_node_id(struct toptree *core) +{ + return emu_cores->to_node_id[core->id]; +} + +/* + * Number of cores in the tree that are not yet pinned + */ +static int cores_free(struct toptree *tree) +{ + struct toptree *core; + int count = 0; + + toptree_for_each(core, tree, CORE) { + if (core_pinned_to_node_id(core) == NODE_ID_FREE) + count++; + } + return count; +} + +/* + * Return node of core + */ +static struct toptree *core_node(struct toptree *core) +{ + return core->parent->parent->parent; +} + +/* + * Return book of core + */ +static struct toptree *core_book(struct toptree *core) +{ + return core->parent->parent; +} + +/* + * Return mc of core + */ +static struct toptree *core_mc(struct toptree *core) +{ + return core->parent; +} + +/* + * Distance between two cores + */ +static int dist_core_to_core(struct toptree *core1, struct toptree *core2) +{ + if (core_book(core1)->id != core_book(core2)->id) + return DIST_BOOK; + if (core_mc(core1)->id != core_mc(core2)->id) + return DIST_MC; + /* Same core or sibling on same MC */ + return DIST_CORE; +} + +/* + * Distance of a node to a core + */ +static int dist_node_to_core(struct toptree *node, struct toptree *core) +{ + struct toptree *core_node; + int dist_min = DIST_MAX; + + toptree_for_each(core_node, node, CORE) + dist_min = min(dist_min, dist_core_to_core(core_node, core)); + return dist_min == DIST_MAX ? DIST_EMPTY : dist_min; +} + +/* + * Unify will delete empty nodes, therefore recreate nodes. + */ +static void toptree_unify_tree(struct toptree *tree) +{ + int nid; + + toptree_unify(tree); + for (nid = 0; nid < emu_nodes; nid++) + toptree_get_child(tree, nid); +} + +/* + * Find the best/nearest node for a given core and ensure that no node + * gets more than "emu_cores->per_node_target + extra" cores. + */ +static struct toptree *node_for_core(struct toptree *numa, struct toptree *core, + int extra) +{ + struct toptree *node, *node_best = NULL; + int dist_cur, dist_best, cores_target; + + cores_target = emu_cores->per_node_target + extra; + dist_best = DIST_MAX; + node_best = NULL; + toptree_for_each(node, numa, NODE) { + /* Already pinned cores must use their nodes */ + if (core_pinned_to_node_id(core) == node->id) { + node_best = node; + break; + } + /* Skip nodes that already have enough cores */ + if (cores_pinned(node) >= cores_target) + continue; + dist_cur = dist_node_to_core(node, core); + if (dist_cur < dist_best) { + dist_best = dist_cur; + node_best = node; + } + } + return node_best; +} + +/* + * Find the best node for each core with respect to "extra" core count + */ +static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys, + int extra) +{ + struct toptree *node, *core, *tmp; + + toptree_for_each_safe(core, tmp, phys, CORE) { + node = node_for_core(numa, core, extra); + if (!node) + return; + toptree_move(core, node); + pin_core_to_node(core->id, node->id); + } +} + +/* + * Move structures of given level to specified NUMA node + */ +static void move_level_to_numa_node(struct toptree *node, struct toptree *phys, + enum toptree_level level, bool perfect) +{ + int cores_free, cores_target = emu_cores->per_node_target; + struct toptree *cur, *tmp; + + toptree_for_each_safe(cur, tmp, phys, level) { + cores_free = cores_target - toptree_count(node, CORE); + if (perfect) { + if (cores_free == toptree_count(cur, CORE)) + toptree_move(cur, node); + } else { + if (cores_free >= toptree_count(cur, CORE)) + toptree_move(cur, node); + } + } +} + +/* + * Move structures of a given level to NUMA nodes. If "perfect" is specified + * move only perfectly fitting structures. Otherwise move also smaller + * than needed structures. + */ +static void move_level_to_numa(struct toptree *numa, struct toptree *phys, + enum toptree_level level, bool perfect) +{ + struct toptree *node; + + toptree_for_each(node, numa, NODE) + move_level_to_numa_node(node, phys, level, perfect); +} + +/* + * For the first run try to move the big structures + */ +static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys) +{ + struct toptree *core; + + /* Always try to move perfectly fitting structures first */ + move_level_to_numa(numa, phys, BOOK, true); + move_level_to_numa(numa, phys, BOOK, false); + move_level_to_numa(numa, phys, MC, true); + move_level_to_numa(numa, phys, MC, false); + /* Now pin all the moved cores */ + toptree_for_each(core, numa, CORE) + pin_core_to_node(core->id, core_node(core)->id); +} + +/* + * Allocate new topology and create required nodes + */ +static struct toptree *toptree_new(int id, int nodes) +{ + struct toptree *tree; + int nid; + + tree = toptree_alloc(TOPOLOGY, id); + if (!tree) + goto fail; + for (nid = 0; nid < nodes; nid++) { + if (!toptree_get_child(tree, nid)) + goto fail; + } + return tree; +fail: + panic("NUMA emulation could not allocate topology"); +} + +/* + * Allocate and initialize core to node mapping + */ +static void create_core_to_node_map(void) +{ + int i; + + emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); + if (emu_cores == NULL) + panic("Could not allocate cores to node memory"); + for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) + emu_cores->to_node_id[i] = NODE_ID_FREE; +} + +/* + * Move cores from physical topology into NUMA target topology + * and try to keep as much of the physical topology as possible. + */ +static struct toptree *toptree_to_numa(struct toptree *phys) +{ + static int first = 1; + struct toptree *numa; + int cores_total; + + cores_total = emu_cores->total + cores_free(phys); + emu_cores->per_node_target = cores_total / emu_nodes; + numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes); + if (first) { + toptree_to_numa_first(numa, phys); + first = 0; + } + toptree_to_numa_single(numa, phys, 0); + toptree_to_numa_single(numa, phys, 1); + toptree_unify_tree(numa); + + WARN_ON(cpumask_weight(&phys->mask)); + return numa; +} + +/* + * Create a toptree out of the physical topology that we got from the hypervisor + */ +static struct toptree *toptree_from_topology(void) +{ + struct toptree *phys, *node, *book, *mc, *core; + struct cpu_topology_s390 *top; + int cpu; + + phys = toptree_new(TOPTREE_ID_PHYS, 1); + + for_each_online_cpu(cpu) { + top = &per_cpu(cpu_topology, cpu); + node = toptree_get_child(phys, 0); + book = toptree_get_child(node, top->book_id); + mc = toptree_get_child(book, top->socket_id); + core = toptree_get_child(mc, top->core_id); + if (!book || !mc || !core) + panic("NUMA emulation could not allocate memory"); + cpumask_set_cpu(cpu, &core->mask); + toptree_update_mask(mc); + } + return phys; +} + +/* + * Add toptree core to topology and create correct CPU masks + */ +static void topology_add_core(struct toptree *core) +{ + struct cpu_topology_s390 *top; + int cpu; + + for_each_cpu(cpu, &core->mask) { + top = &per_cpu(cpu_topology, cpu); + cpumask_copy(&top->thread_mask, &core->mask); + cpumask_copy(&top->core_mask, &core_mc(core)->mask); + cpumask_copy(&top->book_mask, &core_book(core)->mask); + cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); + top->node_id = core_node(core)->id; + } +} + +/* + * Apply toptree to topology and create CPU masks + */ +static void toptree_to_topology(struct toptree *numa) +{ + struct toptree *core; + int i; + + /* Clear all node masks */ + for (i = 0; i < MAX_NUMNODES; i++) + cpumask_clear(node_to_cpumask_map[i]); + + /* Rebuild all masks */ + toptree_for_each(core, numa, CORE) + topology_add_core(core); +} + +/* + * Show the node to core mapping + */ +static void print_node_to_core_map(void) +{ + int nid, cid; + + if (!numa_debug_enabled) + return; + printk(KERN_DEBUG "NUMA node to core mapping\n"); + for (nid = 0; nid < emu_nodes; nid++) { + printk(KERN_DEBUG " node %3d: ", nid); + for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) { + if (emu_cores->to_node_id[cid] == nid) + printk(KERN_CONT "%d ", cid); + } + printk(KERN_CONT "\n"); + } +} + +/* + * Transfer physical topology into a NUMA topology and modify CPU masks + * according to the NUMA topology. + * + * Must be called with "sched_domains_mutex" lock held. + */ +static void emu_update_cpu_topology(void) +{ + struct toptree *phys, *numa; + + if (emu_cores == NULL) + create_core_to_node_map(); + phys = toptree_from_topology(); + numa = toptree_to_numa(phys); + toptree_free(phys); + toptree_to_topology(numa); + toptree_free(numa); + print_node_to_core_map(); +} + +/* + * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum + * alignment (needed for memory hotplug). + */ +static unsigned long emu_setup_size_adjust(unsigned long size) +{ + size = size ? : CONFIG_EMU_SIZE; + size = roundup(size, memory_block_size_bytes()); + return size; +} + +/* + * If we have not enough memory for the specified nodes, reduce the node count. + */ +static int emu_setup_nodes_adjust(int nodes) +{ + int nodes_max; + + nodes_max = memblock.memory.total_size / emu_size; + nodes_max = max(nodes_max, 1); + if (nodes_max >= nodes) + return nodes; + pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes); + return nodes_max; +} + +/* + * Early emu setup + */ +static void emu_setup(void) +{ + emu_size = emu_setup_size_adjust(emu_size); + emu_nodes = emu_setup_nodes_adjust(emu_nodes); + pr_info("Creating %d nodes with memory stripe size %ld MB\n", + emu_nodes, emu_size >> 20); +} + +/* + * Return node id for given page number + */ +static int emu_pfn_to_nid(unsigned long pfn) +{ + return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes; +} + +/* + * Return stripe size + */ +static unsigned long emu_align(void) +{ + return emu_size; +} + +/* + * Return distance between two nodes + */ +static int emu_distance(int node1, int node2) +{ + return (node1 != node2) * EMU_NODE_DIST; +} + +/* + * Define callbacks for generic s390 NUMA infrastructure + */ +const struct numa_mode numa_mode_emu = { + .name = "emu", + .setup = emu_setup, + .update_cpu_topology = emu_update_cpu_topology, + .__pfn_to_nid = emu_pfn_to_nid, + .align = emu_align, + .distance = emu_distance, +}; + +/* + * Kernel parameter: emu_nodes=<n> + */ +static int __init early_parse_emu_nodes(char *p) +{ + int count; + + if (kstrtoint(p, 0, &count) != 0 || count <= 0) + return 0; + if (count <= 0) + return 0; + emu_nodes = min(count, MAX_NUMNODES); + return 0; +} +early_param("emu_nodes", early_parse_emu_nodes); + +/* + * Kernel parameter: emu_size=[<n>[k|M|G|T]] + */ +static int __init early_parse_emu_size(char *p) +{ + emu_size = memparse(p, NULL); + return 0; +} +early_param("emu_size", early_parse_emu_size); diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c new file mode 100644 index 000000000000..09b1d2355bd9 --- /dev/null +++ b/arch/s390/numa/numa.c @@ -0,0 +1,184 @@ +/* + * NUMA support for s390 + * + * Implement NUMA core code. + * + * Copyright IBM Corp. 2015 + */ + +#define KMSG_COMPONENT "numa" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/mmzone.h> +#include <linux/cpumask.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/slab.h> +#include <linux/node.h> + +#include <asm/numa.h> +#include "numa_mode.h" + +pg_data_t *node_data[MAX_NUMNODES]; +EXPORT_SYMBOL(node_data); + +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +const struct numa_mode numa_mode_plain = { + .name = "plain", +}; + +static const struct numa_mode *mode = &numa_mode_plain; + +int numa_pfn_to_nid(unsigned long pfn) +{ + return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0; +} + +void numa_update_cpu_topology(void) +{ + if (mode->update_cpu_topology) + mode->update_cpu_topology(); +} + +int __node_distance(int a, int b) +{ + return mode->distance ? mode->distance(a, b) : 0; +} + +int numa_debug_enabled; + +/* + * alloc_node_data() - Allocate node data + */ +static __init pg_data_t *alloc_node_data(void) +{ + pg_data_t *res; + + res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1); + if (!res) + panic("Could not allocate memory for node data!\n"); + memset(res, 0, sizeof(pg_data_t)); + return res; +} + +/* + * numa_setup_memory() - Assign bootmem to nodes + * + * The memory is first added to memblock without any respect to nodes. + * This is fixed before remaining memblock memory is handed over to the + * buddy allocator. + * An important side effect is that large bootmem allocations might easily + * cross node boundaries, which can be needed for large allocations with + * smaller memory stripes in each node (i.e. when using NUMA emulation). + * + * Memory defines nodes: + * Therefore this routine also sets the nodes online with memory. + */ +static void __init numa_setup_memory(void) +{ + unsigned long cur_base, align, end_of_dram; + int nid = 0; + + end_of_dram = memblock_end_of_DRAM(); + align = mode->align ? mode->align() : ULONG_MAX; + + /* + * Step through all available memory and assign it to the nodes + * indicated by the mode implementation. + * All nodes which are seen here will be set online. + */ + cur_base = 0; + do { + nid = numa_pfn_to_nid(PFN_DOWN(cur_base)); + node_set_online(nid); + memblock_set_node(cur_base, align, &memblock.memory, nid); + cur_base += align; + } while (cur_base < end_of_dram); + + /* Allocate and fill out node_data */ + for (nid = 0; nid < MAX_NUMNODES; nid++) + NODE_DATA(nid) = alloc_node_data(); + + for_each_online_node(nid) { + unsigned long start_pfn, end_pfn; + unsigned long t_start, t_end; + int i; + + start_pfn = ULONG_MAX; + end_pfn = 0; + for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) { + if (t_start < start_pfn) + start_pfn = t_start; + if (t_end > end_pfn) + end_pfn = t_end; + } + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; + NODE_DATA(nid)->node_id = nid; + } +} + +/* + * numa_setup() - Earliest initialization + * + * Assign the mode and call the mode's setup routine. + */ +void __init numa_setup(void) +{ + pr_info("NUMA mode: %s\n", mode->name); + if (mode->setup) + mode->setup(); + numa_setup_memory(); + memblock_dump_all(); +} + + +/* + * numa_init_early() - Initialization initcall + * + * This runs when only one CPU is online and before the first + * topology update is called for by the scheduler. + */ +static int __init numa_init_early(void) +{ + /* Attach all possible CPUs to node 0 for now. */ + cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); + return 0; +} +early_initcall(numa_init_early); + +/* + * numa_init_late() - Initialization initcall + * + * Register NUMA nodes. + */ +static int __init numa_init_late(void) +{ + int nid; + + for_each_online_node(nid) + register_one_node(nid); + return 0; +} +device_initcall(numa_init_late); + +static int __init parse_debug(char *parm) +{ + numa_debug_enabled = 1; + return 0; +} +early_param("numa_debug", parse_debug); + +static int __init parse_numa(char *parm) +{ + if (strcmp(parm, numa_mode_plain.name) == 0) + mode = &numa_mode_plain; +#ifdef CONFIG_NUMA_EMU + if (strcmp(parm, numa_mode_emu.name) == 0) + mode = &numa_mode_emu; +#endif + return 0; +} +early_param("numa", parse_numa); diff --git a/arch/s390/numa/numa_mode.h b/arch/s390/numa/numa_mode.h new file mode 100644 index 000000000000..08953b0b1c7f --- /dev/null +++ b/arch/s390/numa/numa_mode.h @@ -0,0 +1,24 @@ +/* + * NUMA support for s390 + * + * Define declarations used for communication between NUMA mode + * implementations and NUMA core functionality. + * + * Copyright IBM Corp. 2015 + */ +#ifndef __S390_NUMA_MODE_H +#define __S390_NUMA_MODE_H + +struct numa_mode { + char *name; /* Name of mode */ + void (*setup)(void); /* Initizalize mode */ + void (*update_cpu_topology)(void); /* Called by topology code */ + int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */ + unsigned long (*align)(void); /* Minimum node alignment */ + int (*distance)(int a, int b); /* Distance between two nodes */ +}; + +extern const struct numa_mode numa_mode_plain; +extern const struct numa_mode numa_mode_emu; + +#endif /* __S390_NUMA_MODE_H */ diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c new file mode 100644 index 000000000000..902d350d859a --- /dev/null +++ b/arch/s390/numa/toptree.c @@ -0,0 +1,342 @@ +/* + * NUMA support for s390 + * + * A tree structure used for machine topology mangling + * + * Copyright IBM Corp. 2015 + */ + +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/list.h> +#include <linux/list_sort.h> +#include <linux/slab.h> +#include <asm/numa.h> + +#include "toptree.h" + +/** + * toptree_alloc - Allocate and initialize a new tree node. + * @level: The node's vertical level; level 0 contains the leaves. + * @id: ID number, explicitly not unique beyond scope of node's siblings + * + * Allocate a new tree node and initialize it. + * + * RETURNS: + * Pointer to the new tree node or NULL on error + */ +struct toptree *toptree_alloc(int level, int id) +{ + struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); + + if (!res) + return res; + + INIT_LIST_HEAD(&res->children); + INIT_LIST_HEAD(&res->sibling); + cpumask_clear(&res->mask); + res->level = level; + res->id = id; + return res; +} + +/** + * toptree_remove - Remove a tree node from a tree + * @cand: Pointer to the node to remove + * + * The node is detached from its parent node. The parent node's + * masks will be updated to reflect the loss of the child. + */ +static void toptree_remove(struct toptree *cand) +{ + struct toptree *oldparent; + + list_del_init(&cand->sibling); + oldparent = cand->parent; + cand->parent = NULL; + toptree_update_mask(oldparent); +} + +/** + * toptree_free - discard a tree node + * @cand: Pointer to the tree node to discard + * + * Checks if @cand is attached to a parent node. Detaches it + * cleanly using toptree_remove. Possible children are freed + * recursively. In the end @cand itself is freed. + */ +void toptree_free(struct toptree *cand) +{ + struct toptree *child, *tmp; + + if (cand->parent) + toptree_remove(cand); + toptree_for_each_child_safe(child, tmp, cand) + toptree_free(child); + kfree(cand); +} + +/** + * toptree_update_mask - Update node bitmasks + * @cand: Pointer to a tree node + * + * The node's cpumask will be updated by combining all children's + * masks. Then toptree_update_mask is called recursively for the + * parent if applicable. + * + * NOTE: + * This must not be called on leaves. If called on a leaf, its + * CPU mask is cleared and lost. + */ +void toptree_update_mask(struct toptree *cand) +{ + struct toptree *child; + + cpumask_clear(&cand->mask); + list_for_each_entry(child, &cand->children, sibling) + cpumask_or(&cand->mask, &cand->mask, &child->mask); + if (cand->parent) + toptree_update_mask(cand->parent); +} + +/** + * toptree_insert - Insert a tree node into tree + * @cand: Pointer to the node to insert + * @target: Pointer to the node to which @cand will added as a child + * + * Insert a tree node into a tree. Masks will be updated automatically. + * + * RETURNS: + * 0 on success, -1 if NULL is passed as argument or the node levels + * don't fit. + */ +static int toptree_insert(struct toptree *cand, struct toptree *target) +{ + if (!cand || !target) + return -1; + if (target->level != (cand->level + 1)) + return -1; + list_add_tail(&cand->sibling, &target->children); + cand->parent = target; + toptree_update_mask(target); + return 0; +} + +/** + * toptree_move_children - Move all child nodes of a node to a new place + * @cand: Pointer to the node whose children are to be moved + * @target: Pointer to the node to which @cand's children will be attached + * + * Take all child nodes of @cand and move them using toptree_move. + */ +static void toptree_move_children(struct toptree *cand, struct toptree *target) +{ + struct toptree *child, *tmp; + + toptree_for_each_child_safe(child, tmp, cand) + toptree_move(child, target); +} + +/** + * toptree_unify - Merge children with same ID + * @cand: Pointer to node whose direct children should be made unique + * + * When mangling the tree it is possible that a node has two or more children + * which have the same ID. This routine merges these children into one and + * moves all children of the merged nodes into the unified node. + */ +void toptree_unify(struct toptree *cand) +{ + struct toptree *child, *tmp, *cand_copy; + + /* Threads cannot be split, cores are not split */ + if (cand->level < 2) + return; + + cand_copy = toptree_alloc(cand->level, 0); + toptree_for_each_child_safe(child, tmp, cand) { + struct toptree *tmpchild; + + if (!cpumask_empty(&child->mask)) { + tmpchild = toptree_get_child(cand_copy, child->id); + toptree_move_children(child, tmpchild); + } + toptree_free(child); + } + toptree_move_children(cand_copy, cand); + toptree_free(cand_copy); + + toptree_for_each_child(child, cand) + toptree_unify(child); +} + +/** + * toptree_move - Move a node to another context + * @cand: Pointer to the node to move + * @target: Pointer to the node where @cand should go + * + * In the easiest case @cand is exactly on the level below @target + * and will be immediately moved to the target. + * + * If @target's level is not the direct parent level of @cand, + * nodes for the missing levels are created and put between + * @cand and @target. The "stacking" nodes' IDs are taken from + * @cand's parents. + * + * After this it is likely to have redundant nodes in the tree + * which are addressed by means of toptree_unify. + */ +void toptree_move(struct toptree *cand, struct toptree *target) +{ + struct toptree *stack_target, *real_insert_point, *ptr, *tmp; + + if (cand->level + 1 == target->level) { + toptree_remove(cand); + toptree_insert(cand, target); + return; + } + + real_insert_point = NULL; + ptr = cand; + stack_target = NULL; + + do { + tmp = stack_target; + stack_target = toptree_alloc(ptr->level + 1, + ptr->parent->id); + toptree_insert(tmp, stack_target); + if (!real_insert_point) + real_insert_point = stack_target; + ptr = ptr->parent; + } while (stack_target->level < (target->level - 1)); + + toptree_remove(cand); + toptree_insert(cand, real_insert_point); + toptree_insert(stack_target, target); +} + +/** + * toptree_get_child - Access a tree node's child by its ID + * @cand: Pointer to tree node whose child is to access + * @id: The desired child's ID + * + * @cand's children are searched for a child with matching ID. + * If no match can be found, a new child with the desired ID + * is created and returned. + */ +struct toptree *toptree_get_child(struct toptree *cand, int id) +{ + struct toptree *child; + + toptree_for_each_child(child, cand) + if (child->id == id) + return child; + child = toptree_alloc(cand->level-1, id); + toptree_insert(child, cand); + return child; +} + +/** + * toptree_first - Find the first descendant on specified level + * @context: Pointer to tree node whose descendants are to be used + * @level: The level of interest + * + * RETURNS: + * @context's first descendant on the specified level, or NULL + * if there is no matching descendant + */ +struct toptree *toptree_first(struct toptree *context, int level) +{ + struct toptree *child, *tmp; + + if (context->level == level) + return context; + + if (!list_empty(&context->children)) { + list_for_each_entry(child, &context->children, sibling) { + tmp = toptree_first(child, level); + if (tmp) + return tmp; + } + } + return NULL; +} + +/** + * toptree_next_sibling - Return next sibling + * @cur: Pointer to a tree node + * + * RETURNS: + * If @cur has a parent and is not the last in the parent's children list, + * the next sibling is returned. Or NULL when there are no siblings left. + */ +static struct toptree *toptree_next_sibling(struct toptree *cur) +{ + if (cur->parent == NULL) + return NULL; + + if (cur == list_last_entry(&cur->parent->children, + struct toptree, sibling)) + return NULL; + return (struct toptree *) list_next_entry(cur, sibling); +} + +/** + * toptree_next - Tree traversal function + * @cur: Pointer to current element + * @context: Pointer to the root node of the tree or subtree to + * be traversed. + * @level: The level of interest. + * + * RETURNS: + * Pointer to the next node on level @level + * or NULL when there is no next node. + */ +struct toptree *toptree_next(struct toptree *cur, struct toptree *context, + int level) +{ + struct toptree *cur_context, *tmp; + + if (!cur) + return NULL; + + if (context->level == level) + return NULL; + + tmp = toptree_next_sibling(cur); + if (tmp != NULL) + return tmp; + + cur_context = cur; + while (cur_context->level < context->level - 1) { + /* Step up */ + cur_context = cur_context->parent; + /* Step aside */ + tmp = toptree_next_sibling(cur_context); + if (tmp != NULL) { + /* Step down */ + tmp = toptree_first(tmp, level); + if (tmp != NULL) + return tmp; + } + } + return NULL; +} + +/** + * toptree_count - Count descendants on specified level + * @context: Pointer to node whose descendants are to be considered + * @level: Only descendants on the specified level will be counted + * + * RETURNS: + * Number of descendants on the specified level + */ +int toptree_count(struct toptree *context, int level) +{ + struct toptree *cur; + int cnt = 0; + + toptree_for_each(cur, context, level) + cnt++; + return cnt; +} diff --git a/arch/s390/numa/toptree.h b/arch/s390/numa/toptree.h new file mode 100644 index 000000000000..bdf502027af4 --- /dev/null +++ b/arch/s390/numa/toptree.h @@ -0,0 +1,60 @@ +/* + * NUMA support for s390 + * + * A tree structure used for machine topology mangling + * + * Copyright IBM Corp. 2015 + */ +#ifndef S390_TOPTREE_H +#define S390_TOPTREE_H + +#include <linux/cpumask.h> +#include <linux/list.h> + +struct toptree { + int level; + int id; + cpumask_t mask; + struct toptree *parent; + struct list_head sibling; + struct list_head children; +}; + +struct toptree *toptree_alloc(int level, int id); +void toptree_free(struct toptree *cand); +void toptree_update_mask(struct toptree *cand); +void toptree_unify(struct toptree *cand); +struct toptree *toptree_get_child(struct toptree *cand, int id); +void toptree_move(struct toptree *cand, struct toptree *target); +int toptree_count(struct toptree *context, int level); + +struct toptree *toptree_first(struct toptree *context, int level); +struct toptree *toptree_next(struct toptree *cur, struct toptree *context, + int level); + +#define toptree_for_each_child(child, ptree) \ + list_for_each_entry(child, &ptree->children, sibling) + +#define toptree_for_each_child_safe(child, ptmp, ptree) \ + list_for_each_entry_safe(child, ptmp, &ptree->children, sibling) + +#define toptree_is_last(ptree) \ + ((ptree->parent == NULL) || \ + (ptree->parent->children.prev == &ptree->sibling)) + +#define toptree_for_each(ptree, cont, ttype) \ + for (ptree = toptree_first(cont, ttype); \ + ptree != NULL; \ + ptree = toptree_next(ptree, cont, ttype)) + +#define toptree_for_each_safe(ptree, tmp, cont, ttype) \ + for (ptree = toptree_first(cont, ttype), \ + tmp = toptree_next(ptree, cont, ttype); \ + ptree != NULL; \ + ptree = tmp, \ + tmp = toptree_next(ptree, cont, ttype)) + +#define toptree_for_each_sibling(ptree, start) \ + toptree_for_each(ptree, start->parent, start->level) + +#endif /* S390_TOPTREE_H */ diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 598f023cf8a6..17c04c7269e7 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -76,11 +76,6 @@ EXPORT_SYMBOL_GPL(zpci_iomap_start); static struct kmem_cache *zdev_fmb_cache; -struct zpci_dev *get_zdev(struct pci_dev *pdev) -{ - return (struct zpci_dev *) pdev->sysdata; -} - struct zpci_dev *get_zdev_by_fid(u32 fid) { struct zpci_dev *tmp, *zdev = NULL; @@ -269,7 +264,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, unsigned long offset, unsigned long max) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); u64 addr; int idx; @@ -385,7 +380,7 @@ static void zpci_irq_handler(struct airq_struct *airq) int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); unsigned int hwirq, msi_vecs; unsigned long aisb; struct msi_desc *msi; @@ -460,7 +455,7 @@ out: void arch_teardown_msi_irqs(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); struct msi_desc *msi; int rc; @@ -637,7 +632,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int i; for (i = 0; i < PCI_BAR_COUNT; i++) { - if (!zdev->bars[i].size) + if (!zdev->bars[i].size || !zdev->bars[i].res) continue; zpci_free_iomap(zdev, zdev->bars[i].map_idx); @@ -648,7 +643,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int pcibios_add_device(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); struct resource *res; int i; @@ -673,7 +668,7 @@ void pcibios_release_device(struct pci_dev *pdev) int pcibios_enable_device(struct pci_dev *pdev, int mask) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); zdev->pdev = pdev; zpci_debug_init_device(zdev); @@ -684,7 +679,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) void pcibios_disable_device(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); zpci_fmb_disable_device(zdev); zpci_debug_exit_device(zdev); @@ -695,7 +690,7 @@ void pcibios_disable_device(struct pci_dev *pdev) static int zpci_restore(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); int ret = 0; if (zdev->state != ZPCI_FN_STATE_ONLINE) @@ -717,7 +712,7 @@ out: static int zpci_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); if (zdev->state != ZPCI_FN_STATE_ONLINE) return 0; @@ -777,17 +772,22 @@ static int zpci_scan_bus(struct zpci_dev *zdev) ret = zpci_setup_bus_resources(zdev, &resources); if (ret) - return ret; + goto error; zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, zdev, &resources); if (!zdev->bus) { - zpci_cleanup_bus_resources(zdev); - return -EIO; + ret = -EIO; + goto error; } zdev->bus->max_bus_speed = zdev->max_bus_speed; pci_bus_add_devices(zdev->bus); return 0; + +error: + zpci_cleanup_bus_resources(zdev); + pci_free_resource_list(&resources); + return ret; } int zpci_enable_device(struct zpci_dev *zdev) diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 6fd8d5836138..42b76580c8b8 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -277,7 +277,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long nr_pages, iommu_page_index; unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; @@ -316,7 +316,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long iommu_page_index; int npages; @@ -337,7 +337,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct page *page; unsigned long pa; dma_addr_t map; @@ -367,7 +367,7 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); size = PAGE_ALIGN(size); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index ed2394dd14e9..369a3e05d468 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -46,15 +46,13 @@ struct zpci_ccdf_avail { static void __zpci_event_error(struct zpci_ccdf_err *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + struct pci_dev *pdev = zdev ? zdev->pdev : NULL; zpci_err("error CCDF:\n"); zpci_err_hex(ccdf, sizeof(*ccdf)); - if (!zdev) - return; - pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", - pci_name(zdev->pdev), ccdf->pec, ccdf->fid); + pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); } void zpci_event_error(void *data) @@ -89,7 +87,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) ret = zpci_enable_device(zdev); if (ret) break; + pci_lock_rescan_remove(); pci_rescan_bus(zdev->bus); + pci_unlock_rescan_remove(); break; case 0x0302: /* Reserved -> Standby */ if (!zdev) @@ -97,7 +97,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; case 0x0303: /* Deconfiguration requested */ if (pdev) - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); ret = zpci_disable_device(zdev); if (ret) @@ -114,7 +114,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) /* Give the driver a hint that the function is * already unusable. */ pdev->error_state = pci_channel_io_perm_failure; - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); } zdev->fh = ccdf->fh; diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 85267c058af8..dcc2634ccbe2 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -8,10 +8,23 @@ #include <linux/errno.h> #include <linux/delay.h> #include <asm/pci_insn.h> +#include <asm/pci_debug.h> #include <asm/processor.h> #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ +static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) +{ + struct { + u8 cc; + u8 status; + u64 req; + u64 offset; + } data = {cc, status, req, offset}; + + zpci_err_hex(&data, sizeof(data)); +} + /* Modify PCI Function Controls */ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) { @@ -38,8 +51,8 @@ int zpci_mod_fc(u64 req, struct zpci_fib *fib) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d\n", - __func__, cc, status); + zpci_err_insn(cc, status, req, 0); + return (cc) ? -EIO : 0; } @@ -72,8 +85,8 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", - __func__, cc, status, addr, range); + zpci_err_insn(cc, status, addr, range); + return (cc) ? -EIO : 0; } @@ -121,8 +134,8 @@ int zpci_load(u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_load); @@ -159,8 +172,8 @@ int zpci_store(u64 data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_store); @@ -195,8 +208,8 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_store_block); diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index fa3ce891e597..f37a5808883d 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -16,7 +16,7 @@ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); \ + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); \ \ return sprintf(buf, fmt, zdev->member); \ } \ @@ -38,23 +38,30 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); int ret; if (!device_remove_file_self(dev, attr)) return count; + pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); ret = zpci_disable_device(zdev); if (ret) - return ret; + goto error; ret = zpci_enable_device(zdev); if (ret) - return ret; + goto error; pci_rescan_bus(zdev->bus); + pci_unlock_rescan_remove(); + return count; + +error: + pci_unlock_rescan_remove(); + return ret; } static DEVICE_ATTR_WO(recover); @@ -64,7 +71,7 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj, { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); return memory_read_from_buffer(buf, count, &off, zdev->util_str, sizeof(zdev->util_str)); diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h index 031756b59ece..fda3f83308d2 100644 --- a/arch/score/include/asm/switch_to.h +++ b/arch/score/include/asm/switch_to.h @@ -8,6 +8,4 @@ do { \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) -#define finish_arch_switch(prev) do {} while (0) - #endif /* _ASM_SCORE_SWITCH_TO_H */ diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h index cbf763b3015e..0288efc17ff3 100644 --- a/arch/sh/drivers/pci/pci-sh4.h +++ b/arch/sh/drivers/pci/pci-sh4.h @@ -11,14 +11,6 @@ #include <asm/io.h> -/* startup values */ -#define PCI_PROBE_BIOS 1 -#define PCI_PROBE_CONF1 2 -#define PCI_PROBE_CONF2 4 -#define PCI_NO_CHECKS 0x400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 - #define SH4_PCICR 0x100 /* PCI Control Register */ #define SH4_PCICR_PREFIX 0xA5000000 /* CR prefix for write */ #define SH4_PCICR_FTO 0x00000400 /* TRDY/IRDY Enable */ diff --git a/arch/sh/include/asm/switch_to_32.h b/arch/sh/include/asm/switch_to_32.h index 0c065513e7ac..7661b4ba8259 100644 --- a/arch/sh/include/asm/switch_to_32.h +++ b/arch/sh/include/asm/switch_to_32.h @@ -78,6 +78,8 @@ do { \ \ if (is_dsp_enabled(prev)) \ __save_dsp(prev); \ + if (is_dsp_enabled(next)) \ + __restore_dsp(next); \ \ __ts1 = (u32 *)&prev->thread.sp; \ __ts2 = (u32 *)&prev->thread.pc; \ @@ -125,10 +127,4 @@ do { \ last = __last; \ } while (0) -#define finish_arch_switch(prev) \ -do { \ - if (is_dsp_enabled(prev)) \ - __restore_dsp(prev); \ -} while (0) - #endif /* __ASM_SH_SWITCH_TO_32_H */ diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 0a47bd3e7bee..4ca78ed71ad2 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -355,13 +355,12 @@ static int sq_dev_add(struct device *dev, struct subsys_interface *sif) return error; } -static int sq_dev_remove(struct device *dev, struct subsys_interface *sif) +static void sq_dev_remove(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct kobject *kobj = sq_kobject[cpu]; kobject_put(kobj); - return 0; } static struct subsys_interface sq_interface = { diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index c928bc64b4ba..3a0e1a986bfe 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -249,7 +249,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct pci_bus *bus, int devfn) { struct dev_archdata *sd; - struct pci_slot *slot; struct platform_device *op; struct pci_dev *dev; const char *type; @@ -290,10 +289,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); - list_for_each_entry(slot, &dev->bus->slots, list) - if (PCI_SLOT(dev->devfn) == slot->number) - dev->slot = slot; - + pci_dev_assign_slot(dev); dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); dev->device = of_getintprop_default(node, "device-id", 0xffff); dev->subsystem_vendor = diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 50e7b626afe8..c5113c7ce2fd 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); /* - * A new process must start with interrupts closed in 2.5, - * because this is how Mingo's scheduler works (see schedule_tail - * and finish_arch_switch). If we do not do it, a timer interrupt hits - * before we unlock, attempts to re-take the rq->lock, and then we die. - * Thus, kpsr|=PSR_PIL. + * A new process must start with interrupts disabled, see schedule_tail() + * and finish_task_switch(). (If we do not do it and if a timer interrupt + * hits before we unlock and attempts to take the rq->lock, we deadlock.) + * + * Thus, kpsr |= PSR_PIL. */ ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index b8f888cbe6b0..34ee72705521 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void); * Kernel threads can check to see if they need to migrate their * stack whenever they return from a context switch; for user * threads, we defer until they are returning to user-space. + * We defer homecache migration until the runqueue lock is released. */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ +#define finish_arch_post_lock_switch() do { \ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ + current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ } while (0) diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index a45213781ad0..7d5769310bef 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, hardwall_switch_tasks(prev, next); #endif + /* Notify the simulator of task exit. */ + if (unlikely(prev->state == TASK_DEAD)) + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | + (prev->pid << _SIM_CONTROL_OPERATOR_BITS)); + /* * Switch kernel SP, PC, and callee-saved registers. * In the context of the new task, return the old task pointer diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index a3ed12f8f83b..825867c53853 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c @@ -198,16 +198,13 @@ static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif) return err; } -static int hv_stats_device_remove(struct device *dev, - struct subsys_interface *sif) +static void hv_stats_device_remove(struct device *dev, + struct subsys_interface *sif) { int cpu = dev->id; - if (!cpu_online(cpu)) - return 0; - - sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); - return 0; + if (cpu_online(cpu)) + sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); } diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h index 83a91f976330..35ab97e4bb9b 100644 --- a/arch/um/include/shared/kern_util.h +++ b/arch/um/include/shared/kern_util.h @@ -22,7 +22,8 @@ extern int kmalloc_ok; extern unsigned long alloc_stack(int order, int atomic); extern void free_stack(unsigned long stack, int order); -extern int do_signal(void); +struct pt_regs; +extern void do_signal(struct pt_regs *regs); extern void interrupt_end(void); extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 68b9119841cd..a6d922672b9f 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -90,12 +90,14 @@ void *__switch_to(struct task_struct *from, struct task_struct *to) void interrupt_end(void) { + struct pt_regs *regs = ¤t->thread.regs; + if (need_resched()) schedule(); if (test_thread_flag(TIF_SIGPENDING)) - do_signal(); + do_signal(regs); if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(¤t->thread.regs); + tracehook_notify_resume(regs); } void exit_thread(void) diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 4f60e4aad790..57acbd67d85d 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -64,7 +64,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) signal_setup_done(err, ksig, singlestep); } -static int kern_do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct ksignal ksig; int handled_sig = 0; @@ -110,10 +110,4 @@ static int kern_do_signal(struct pt_regs *regs) */ if (!handled_sig) restore_saved_sigmask(); - return handled_sig; -} - -int do_signal(void) -{ - return kern_do_signal(¤t->thread.regs); } diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index f1b3eb14b855..2077248e8a72 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -291,7 +291,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, /* We are under mmap_sem, release it such that current can terminate */ up_write(¤t->mm->mmap_sem); force_sig(SIGKILL, current); - do_signal(); + do_signal(¤t->thread.regs); } } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 557232f758b6..d8a9fce6ee2e 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -173,7 +173,7 @@ static void bad_segv(struct faultinfo fi, unsigned long ip) void fatal_sigsegv(void) { force_sigsegv(SIGSEGV, current); - do_signal(); + do_signal(¤t->thread.regs); /* * This is to tell gcc that we're not returning - do_signal * can, in general, return, but in this case, it's not, since diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b3a1a5d77d92..48f7433dac6f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -133,7 +133,7 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_UID16 if X86_32 + select HAVE_UID16 if X86_32 || IA32_EMULATION select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER select IRQ_FORCED_THREADING @@ -955,6 +955,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check / overheating reporting" + select GENERIC_ALLOCATOR default y ---help--- Machine Check support allows the processor to notify the @@ -1002,19 +1003,41 @@ config X86_THERMAL_VECTOR def_bool y depends on X86_MCE_INTEL -config VM86 - bool "Enable VM86 support" if EXPERT - default y +config X86_LEGACY_VM86 + bool "Legacy VM86 support (obsolete)" + default n depends on X86_32 ---help--- - This option is required by programs like DOSEMU to run - 16-bit real mode legacy code on x86 processors. It also may - be needed by software like XFree86 to initialize some video - cards via BIOS. Disabling this option saves about 6K. + This option allows user programs to put the CPU into V8086 + mode, which is an 80286-era approximation of 16-bit real mode. + + Some very old versions of X and/or vbetool require this option + for user mode setting. Similarly, DOSEMU will use it if + available to accelerate real mode DOS programs. However, any + recent version of DOSEMU, X, or vbetool should be fully + functional even without kernel VM86 support, as they will all + fall back to (pretty well performing) software emulation. + + Anything that works on a 64-bit kernel is unlikely to need + this option, as 64-bit kernels don't, and can't, support V8086 + mode. This option is also unrelated to 16-bit protected mode + and is not needed to run most 16-bit programs under Wine. + + Enabling this option adds considerable attack surface to the + kernel and slows down system calls and exception handling. + + Unless you use very old userspace or need the last drop of + performance in your real mode DOS games and can't use KVM, + say N here. + +config VM86 + bool + default X86_LEGACY_VM86 config X86_16BIT bool "Enable support for 16-bit segments" if EXPERT default y + depends on MODIFY_LDT_SYSCALL ---help--- This option is required by programs like Wine to run 16-bit protected mode legacy code on x86 processors. Disabling @@ -1509,6 +1532,7 @@ config X86_RESERVE_LOW config MATH_EMULATION bool + depends on MODIFY_LDT_SYSCALL prompt "Math emulation" if X86_32 ---help--- Linux can emulate a math coprocessor (used for floating point @@ -2053,6 +2077,22 @@ config CMDLINE_OVERRIDE This is used to work around broken boot loaders. This should be set to 'N' under normal conditions. +config MODIFY_LDT_SYSCALL + bool "Enable the LDT (local descriptor table)" if EXPERT + default y + ---help--- + Linux can allow user programs to install a per-process x86 + Local Descriptor Table (LDT) using the modify_ldt(2) system + call. This is required to run 16-bit or segmented code such as + DOSEMU or some Wine programs. It is also used by some very old + threading libraries. + + Enabling this feature adds a small amount of overhead to + context switches and increases the low-level kernel attack + surface. Disabling it removes the modify_ldt(2) system call. + + Saying 'N' here may make sense for embedded or server kernels. + source "kernel/livepatch/Kconfig" endmenu @@ -2522,7 +2562,7 @@ config IA32_EMULATION depends on X86_64 select BINFMT_ELF select COMPAT_BINFMT_ELF - select HAVE_UID16 + select ARCH_WANT_OLD_COMPAT_IPC ---help--- Include code to run legacy 32-bit programs under a 64-bit kernel. You should likely turn this on, unless you're @@ -2536,7 +2576,7 @@ config IA32_AOUT config X86_X32 bool "x32 ABI for 64-bit mode" - depends on X86_64 && IA32_EMULATION + depends on X86_64 ---help--- Include code to run binaries for the x32 native 32-bit ABI for 64-bit processors. An x32 process gets access to the @@ -2550,7 +2590,6 @@ config X86_X32 config COMPAT def_bool y depends on IA32_EMULATION || X86_X32 - select ARCH_WANT_OLD_COMPAT_IPC if COMPAT config COMPAT_FOR_U64_ALIGNMENT diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 118e6debc483..747860c696e1 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -39,6 +39,16 @@ ifdef CONFIG_X86_NEED_RELOCS LDFLAGS_vmlinux := --emit-relocs endif +# +# Prevent GCC from generating any FP code by mistake. +# +# This must happen before we try the -mpreferred-stack-boundary, see: +# +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 +# +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow +KBUILD_CFLAGS += $(call cc-option,-mno-avx,) + ifeq ($(CONFIG_X86_32),y) BITS := 32 UTS_MACHINE := i386 @@ -167,9 +177,6 @@ KBUILD_CFLAGS += -pipe KBUILD_CFLAGS += -Wno-sign-compare # KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -# prevent gcc from generating any FP code by mistake -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) @@ -212,6 +219,8 @@ drivers-$(CONFIG_PM) += arch/x86/power/ drivers-$(CONFIG_FB) += arch/x86/video/ +drivers-$(CONFIG_RAS) += arch/x86/ras/ + #### # boot loader support. Several targets are kept for legacy purposes diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 57bbf2fb21f6..0d553e54171b 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -23,7 +23,7 @@ targets += fdimage fdimage144 fdimage288 image.iso mtools.conf subdir- := compressed setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o -setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o +setup-y += early_serial_console.o edd.o header.o main.o memory.o setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o setup-y += video-mode.o version.o setup-$(CONFIG_X86_APM_BOOT) += apm.o diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index bd49ec61255c..0033e96c3f09 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -307,9 +307,6 @@ void query_edd(void); /* header.S */ void __attribute__((noreturn)) die(void); -/* mca.c */ -int query_mca(void); - /* memory.c */ int detect_memory(void); diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index d7b1f655b3ef..6a9b96b4624d 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -82,7 +82,7 @@ static unsigned long get_random_long(void) if (has_cpuflag(X86_FEATURE_TSC)) { debug_putstr(" RDTSC"); - rdtscll(raw); + raw = rdtsc(); random ^= raw; use_i8254 = false; diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 7d69afd8b6fa..ee1b6d346b98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1041,7 +1041,6 @@ void setup_graphics(struct boot_params *boot_params) struct boot_params *make_boot_params(struct efi_config *c) { struct boot_params *boot_params; - struct sys_desc_table *sdt; struct apm_bios_info *bi; struct setup_header *hdr; struct efi_info *efi; @@ -1089,7 +1088,6 @@ struct boot_params *make_boot_params(struct efi_config *c) hdr = &boot_params->hdr; efi = &boot_params->efi_info; bi = &boot_params->apm_bios_info; - sdt = &boot_params->sys_desc_table; /* Copy the second sector to boot_params */ memcpy(&hdr->jump, image->image_base + 512, 512); @@ -1118,8 +1116,6 @@ struct boot_params *make_boot_params(struct efi_config *c) /* Clear APM BIOS info */ memset(bi, 0, sizeof(*bi)); - memset(sdt, 0, sizeof(*sdt)); - status = efi_parse_options(cmdline_ptr); if (status != EFI_SUCCESS) goto fail2; diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index a107b935e22f..f63797942bb5 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -220,6 +220,23 @@ void __putstr(const char *s) outb(0xff & (pos >> 1), vidport+1); } +void __puthex(unsigned long value) +{ + char alpha[2] = "0"; + int bits; + + for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) { + unsigned long digit = (value >> bits) & 0xf; + + if (digit < 0xA) + alpha[0] = '0' + digit; + else + alpha[0] = 'a' + (digit - 0xA); + + __putstr(alpha); + } +} + static void error(char *x) { error_putstr("\n\n"); @@ -399,6 +416,13 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; + /* Report initial kernel position details. */ + debug_putaddr(input_data); + debug_putaddr(input_len); + debug_putaddr(output); + debug_putaddr(output_len); + debug_putaddr(run_size); + /* * The memory hole needed for the kernel is the larger of either * the entire decompressed kernel plus relocation table, or the diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 805d25ca5f1d..3783dc3e10b3 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -34,16 +34,27 @@ extern memptr free_mem_ptr; extern memptr free_mem_end_ptr; extern struct boot_params *real_mode; /* Pointer to real-mode data */ void __putstr(const char *s); +void __puthex(unsigned long value); #define error_putstr(__x) __putstr(__x) +#define error_puthex(__x) __puthex(__x) #ifdef CONFIG_X86_VERBOSE_BOOTUP #define debug_putstr(__x) __putstr(__x) +#define debug_puthex(__x) __puthex(__x) +#define debug_putaddr(__x) { \ + debug_putstr(#__x ": 0x"); \ + debug_puthex((unsigned long)(__x)); \ + debug_putstr("\n"); \ + } #else static inline void debug_putstr(const char *s) { } +static inline void debug_puthex(const char *s) +{ } +#define debug_putaddr(x) /* */ #endif diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index fd6c9f236996..9bcea386db65 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -161,9 +161,6 @@ void main(void) /* Set keyboard repeat rate (why?) and query the lock flags */ keyboard_init(); - /* Query MCA information */ - query_mca(); - /* Query Intel SpeedStep (IST) information */ query_ist(); diff --git a/arch/x86/boot/mca.c b/arch/x86/boot/mca.c deleted file mode 100644 index a95a531148ef..000000000000 --- a/arch/x86/boot/mca.c +++ /dev/null @@ -1,38 +0,0 @@ -/* -*- linux-c -*- ------------------------------------------------------- * - * - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - * - * ----------------------------------------------------------------------- */ - -/* - * Get the MCA system description table - */ - -#include "boot.h" - -int query_mca(void) -{ - struct biosregs ireg, oreg; - u16 len; - - initregs(&ireg); - ireg.ah = 0xc0; - intcall(0x15, &ireg, &oreg); - - if (oreg.eflags & X86_EFLAGS_CF) - return -1; /* No MCA present */ - - set_fs(oreg.es); - len = rdfs16(oreg.bx); - - if (len > sizeof(boot_params.sys_desc_table)) - len = sizeof(boot_params.sys_desc_table); - - copy_from_fs(&boot_params.sys_desc_table, oreg.bx, len); - return 0; -} diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index aaa1118bf01e..028be48c8839 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -23,6 +23,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 315b86106572..962297d244b3 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5a4a089e8b1f..9a2838cf0591 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o +obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o @@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o +obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) @@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o +chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o ifeq ($(avx_supported),yes) @@ -75,6 +78,7 @@ endif ifeq ($(avx2_supported),yes) camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o + chacha20-x86_64-y += chacha20-avx2-x86_64.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o endif @@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o +poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o ifeq ($(avx2_supported),yes) sha1-ssse3-y += sha1_avx2_x86_64_asm.o +poly1305-x86_64-y += poly1305-avx2-x86_64.o endif crc32c-intel-y := crc32c-intel_glue.o crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index dccad38b59a8..3633ad6145c5 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead) return PTR_ERR(cryptd_tfm); *ctx = cryptd_tfm; - crypto_aead_set_reqsize( - aead, - sizeof(struct aead_request) + - crypto_aead_reqsize(&cryptd_tfm->base)); + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); return 0; } @@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req) /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ - /* to 8 or 12 bytes */ - if (unlikely(req->assoclen != 8 && req->assoclen != 12)) + /* to 16 or 20 bytes */ + if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* IV below built */ @@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req) } kernel_fpu_begin(); - aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, - ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst - + ((unsigned long)req->cryptlen), auth_tag_len); + aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, + ctx->hash_subkey, assoc, req->assoclen - 8, + dst + req->cryptlen, auth_tag_len); kernel_fpu_end(); /* The authTag (aka the Integrity Check Value) needs to be written @@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req) struct scatter_walk dst_sg_walk; unsigned int i; - if (unlikely(req->assoclen != 8 && req->assoclen != 12)) + if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ - /* equal to 8 or 12 bytes */ + /* equal to 16 or 20 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ @@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req) kernel_fpu_begin(); aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, - ctx->hash_subkey, assoc, (unsigned long)req->assoclen, - authTag, auth_tag_len); + ctx->hash_subkey, assoc, req->assoclen - 8, + authTag, auth_tag_len); kernel_fpu_end(); /* Compare generated tag with passed in tag. */ @@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - struct aead_request *subreq = aead_request_ctx(req); - aead_request_set_tfm(subreq, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); + aead_request_set_tfm(req, irq_fpu_usable() ? + cryptd_aead_child(cryptd_tfm) : + &cryptd_tfm->base); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); - - return crypto_aead_encrypt(subreq); + return crypto_aead_encrypt(req); } static int rfc4106_decrypt(struct aead_request *req) @@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - struct aead_request *subreq = aead_request_ctx(req); - - aead_request_set_tfm(subreq, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); + aead_request_set_tfm(req, irq_fpu_usable() ? + cryptd_aead_child(cryptd_tfm) : + &cryptd_tfm->base); - return crypto_aead_decrypt(subreq); + return crypto_aead_decrypt(req); } #endif diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S new file mode 100644 index 000000000000..16694e625f77 --- /dev/null +++ b/arch/x86/crypto/chacha20-avx2-x86_64.S @@ -0,0 +1,443 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 32 + +ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 + .octa 0x0e0d0c0f0a09080b0605040702010003 +ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 + .octa 0x0d0c0f0e09080b0a0504070601000302 +CTRINC: .octa 0x00000003000000020000000100000000 + .octa 0x00000007000000060000000500000004 + +.text + +ENTRY(chacha20_8block_xor_avx2) + # %rdi: Input state matrix, s + # %rsi: 8 data blocks output, o + # %rdx: 8 data blocks input, i + + # This function encrypts eight consecutive ChaCha20 blocks by loading + # the state matrix in AVX registers eight times. As we need some + # scratch registers, we save the first four registers on the stack. The + # algorithm performs each operation on the corresponding word of each + # state matrix, hence requires no word shuffling. For final XORing step + # we transpose the matrix by interleaving 32-, 64- and then 128-bit + # words, which allows us to do XOR in AVX registers. 8/16-bit word + # rotation is done with the slightly better performing byte shuffling, + # 7/12-bit word rotation uses traditional shift+OR. + + vzeroupper + # 4 * 32 byte stack, 32-byte aligned + mov %rsp, %r8 + and $~31, %rsp + sub $0x80, %rsp + + # x0..15[0-7] = s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpbroadcastd 0x04(%rdi),%ymm1 + vpbroadcastd 0x08(%rdi),%ymm2 + vpbroadcastd 0x0c(%rdi),%ymm3 + vpbroadcastd 0x10(%rdi),%ymm4 + vpbroadcastd 0x14(%rdi),%ymm5 + vpbroadcastd 0x18(%rdi),%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm7 + vpbroadcastd 0x20(%rdi),%ymm8 + vpbroadcastd 0x24(%rdi),%ymm9 + vpbroadcastd 0x28(%rdi),%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm11 + vpbroadcastd 0x30(%rdi),%ymm12 + vpbroadcastd 0x34(%rdi),%ymm13 + vpbroadcastd 0x38(%rdi),%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm15 + # x0..3 on stack + vmovdqa %ymm0,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm3,0x60(%rsp) + + vmovdqa CTRINC(%rip),%ymm1 + vmovdqa ROT8(%rip),%ymm2 + vmovdqa ROT16(%rip),%ymm3 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + + mov $10,%ecx + +.Ldoubleround8: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + dec %ecx + jnz .Ldoubleround8 + + # x0..15[0-3] += s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpaddd 0x00(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpbroadcastd 0x04(%rdi),%ymm0 + vpaddd 0x20(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpbroadcastd 0x08(%rdi),%ymm0 + vpaddd 0x40(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpbroadcastd 0x0c(%rdi),%ymm0 + vpaddd 0x60(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpbroadcastd 0x10(%rdi),%ymm0 + vpaddd %ymm0,%ymm4,%ymm4 + vpbroadcastd 0x14(%rdi),%ymm0 + vpaddd %ymm0,%ymm5,%ymm5 + vpbroadcastd 0x18(%rdi),%ymm0 + vpaddd %ymm0,%ymm6,%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm0 + vpaddd %ymm0,%ymm7,%ymm7 + vpbroadcastd 0x20(%rdi),%ymm0 + vpaddd %ymm0,%ymm8,%ymm8 + vpbroadcastd 0x24(%rdi),%ymm0 + vpaddd %ymm0,%ymm9,%ymm9 + vpbroadcastd 0x28(%rdi),%ymm0 + vpaddd %ymm0,%ymm10,%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm0 + vpaddd %ymm0,%ymm11,%ymm11 + vpbroadcastd 0x30(%rdi),%ymm0 + vpaddd %ymm0,%ymm12,%ymm12 + vpbroadcastd 0x34(%rdi),%ymm0 + vpaddd %ymm0,%ymm13,%ymm13 + vpbroadcastd 0x38(%rdi),%ymm0 + vpaddd %ymm0,%ymm14,%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm0 + vpaddd %ymm0,%ymm15,%ymm15 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + + # interleave 32-bit words in state n, n+1 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x20(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa 0x40(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm1,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpckldq %ymm5,%ymm0,%ymm4 + vpunpckhdq %ymm5,%ymm0,%ymm5 + vmovdqa %ymm6,%ymm0 + vpunpckldq %ymm7,%ymm0,%ymm6 + vpunpckhdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpckldq %ymm9,%ymm0,%ymm8 + vpunpckhdq %ymm9,%ymm0,%ymm9 + vmovdqa %ymm10,%ymm0 + vpunpckldq %ymm11,%ymm0,%ymm10 + vpunpckhdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpckldq %ymm13,%ymm0,%ymm12 + vpunpckhdq %ymm13,%ymm0,%ymm13 + vmovdqa %ymm14,%ymm0 + vpunpckldq %ymm15,%ymm0,%ymm14 + vpunpckhdq %ymm15,%ymm0,%ymm15 + + # interleave 64-bit words in state n, n+2 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x40(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x00(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa 0x20(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpcklqdq %ymm6,%ymm0,%ymm4 + vpunpckhqdq %ymm6,%ymm0,%ymm6 + vmovdqa %ymm5,%ymm0 + vpunpcklqdq %ymm7,%ymm0,%ymm5 + vpunpckhqdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpcklqdq %ymm10,%ymm0,%ymm8 + vpunpckhqdq %ymm10,%ymm0,%ymm10 + vmovdqa %ymm9,%ymm0 + vpunpcklqdq %ymm11,%ymm0,%ymm9 + vpunpckhqdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpcklqdq %ymm14,%ymm0,%ymm12 + vpunpckhqdq %ymm14,%ymm0,%ymm14 + vmovdqa %ymm13,%ymm0 + vpunpcklqdq %ymm15,%ymm0,%ymm13 + vpunpckhqdq %ymm15,%ymm0,%ymm15 + + # interleave 128-bit words in state n, n+4 + vmovdqa 0x00(%rsp),%ymm0 + vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 + vmovdqa %ymm1,0x00(%rsp) + vmovdqa 0x20(%rsp),%ymm0 + vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 + vmovdqa %ymm1,0x20(%rsp) + vmovdqa 0x40(%rsp),%ymm0 + vperm2i128 $0x20,%ymm6,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm6,%ymm0,%ymm6 + vmovdqa %ymm1,0x40(%rsp) + vmovdqa 0x60(%rsp),%ymm0 + vperm2i128 $0x20,%ymm7,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm7,%ymm0,%ymm7 + vmovdqa %ymm1,0x60(%rsp) + vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 + vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 + vmovdqa %ymm0,%ymm8 + vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 + vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 + vmovdqa %ymm0,%ymm9 + vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 + vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 + vmovdqa %ymm0,%ymm10 + vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 + vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 + vmovdqa %ymm0,%ymm11 + + # xor with corresponding input, write to output + vmovdqa 0x00(%rsp),%ymm0 + vpxor 0x0000(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0000(%rsi) + vmovdqa 0x20(%rsp),%ymm0 + vpxor 0x0080(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0080(%rsi) + vmovdqa 0x40(%rsp),%ymm0 + vpxor 0x0040(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0040(%rsi) + vmovdqa 0x60(%rsp),%ymm0 + vpxor 0x00c0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x00c0(%rsi) + vpxor 0x0100(%rdx),%ymm4,%ymm4 + vmovdqu %ymm4,0x0100(%rsi) + vpxor 0x0180(%rdx),%ymm5,%ymm5 + vmovdqu %ymm5,0x00180(%rsi) + vpxor 0x0140(%rdx),%ymm6,%ymm6 + vmovdqu %ymm6,0x0140(%rsi) + vpxor 0x01c0(%rdx),%ymm7,%ymm7 + vmovdqu %ymm7,0x01c0(%rsi) + vpxor 0x0020(%rdx),%ymm8,%ymm8 + vmovdqu %ymm8,0x0020(%rsi) + vpxor 0x00a0(%rdx),%ymm9,%ymm9 + vmovdqu %ymm9,0x00a0(%rsi) + vpxor 0x0060(%rdx),%ymm10,%ymm10 + vmovdqu %ymm10,0x0060(%rsi) + vpxor 0x00e0(%rdx),%ymm11,%ymm11 + vmovdqu %ymm11,0x00e0(%rsi) + vpxor 0x0120(%rdx),%ymm12,%ymm12 + vmovdqu %ymm12,0x0120(%rsi) + vpxor 0x01a0(%rdx),%ymm13,%ymm13 + vmovdqu %ymm13,0x01a0(%rsi) + vpxor 0x0160(%rdx),%ymm14,%ymm14 + vmovdqu %ymm14,0x0160(%rsi) + vpxor 0x01e0(%rdx),%ymm15,%ymm15 + vmovdqu %ymm15,0x01e0(%rsi) + + vzeroupper + mov %r8,%rsp + ret +ENDPROC(chacha20_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S new file mode 100644 index 000000000000..712b13047b41 --- /dev/null +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S @@ -0,0 +1,625 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 16 + +ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 +ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 +CTRINC: .octa 0x00000003000000020000000100000000 + +.text + +ENTRY(chacha20_block_xor_ssse3) + # %rdi: Input state matrix, s + # %rsi: 1 data block output, o + # %rdx: 1 data block input, i + + # This function encrypts one ChaCha20 block by loading the state matrix + # in four SSE registers. It performs matrix operation on four words in + # parallel, but requireds shuffling to rearrange the words after each + # round. 8/16-bit word rotation is done with the slightly better + # performing SSSE3 byte shuffling, 7/12-bit word rotation uses + # traditional shift+OR. + + # x0..3 = s0..3 + movdqa 0x00(%rdi),%xmm0 + movdqa 0x10(%rdi),%xmm1 + movdqa 0x20(%rdi),%xmm2 + movdqa 0x30(%rdi),%xmm3 + movdqa %xmm0,%xmm8 + movdqa %xmm1,%xmm9 + movdqa %xmm2,%xmm10 + movdqa %xmm3,%xmm11 + + movdqa ROT8(%rip),%xmm4 + movdqa ROT16(%rip),%xmm5 + + mov $10,%ecx + +.Ldoubleround: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm5,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm6 + pslld $12,%xmm6 + psrld $20,%xmm1 + por %xmm6,%xmm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm4,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm7 + pslld $7,%xmm7 + psrld $25,%xmm1 + por %xmm7,%xmm1 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + pshufd $0x39,%xmm1,%xmm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + pshufd $0x4e,%xmm2,%xmm2 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + pshufd $0x93,%xmm3,%xmm3 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm5,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm6 + pslld $12,%xmm6 + psrld $20,%xmm1 + por %xmm6,%xmm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm4,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm7 + pslld $7,%xmm7 + psrld $25,%xmm1 + por %xmm7,%xmm1 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + pshufd $0x93,%xmm1,%xmm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + pshufd $0x4e,%xmm2,%xmm2 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + pshufd $0x39,%xmm3,%xmm3 + + dec %ecx + jnz .Ldoubleround + + # o0 = i0 ^ (x0 + s0) + movdqu 0x00(%rdx),%xmm4 + paddd %xmm8,%xmm0 + pxor %xmm4,%xmm0 + movdqu %xmm0,0x00(%rsi) + # o1 = i1 ^ (x1 + s1) + movdqu 0x10(%rdx),%xmm5 + paddd %xmm9,%xmm1 + pxor %xmm5,%xmm1 + movdqu %xmm1,0x10(%rsi) + # o2 = i2 ^ (x2 + s2) + movdqu 0x20(%rdx),%xmm6 + paddd %xmm10,%xmm2 + pxor %xmm6,%xmm2 + movdqu %xmm2,0x20(%rsi) + # o3 = i3 ^ (x3 + s3) + movdqu 0x30(%rdx),%xmm7 + paddd %xmm11,%xmm3 + pxor %xmm7,%xmm3 + movdqu %xmm3,0x30(%rsi) + + ret +ENDPROC(chacha20_block_xor_ssse3) + +ENTRY(chacha20_4block_xor_ssse3) + # %rdi: Input state matrix, s + # %rsi: 4 data blocks output, o + # %rdx: 4 data blocks input, i + + # This function encrypts four consecutive ChaCha20 blocks by loading the + # the state matrix in SSE registers four times. As we need some scratch + # registers, we save the first four registers on the stack. The + # algorithm performs each operation on the corresponding word of each + # state matrix, hence requires no word shuffling. For final XORing step + # we transpose the matrix by interleaving 32- and then 64-bit words, + # which allows us to do XOR in SSE registers. 8/16-bit word rotation is + # done with the slightly better performing SSSE3 byte shuffling, + # 7/12-bit word rotation uses traditional shift+OR. + + sub $0x40,%rsp + + # x0..15[0-3] = s0..3[0..3] + movq 0x00(%rdi),%xmm1 + pshufd $0x00,%xmm1,%xmm0 + pshufd $0x55,%xmm1,%xmm1 + movq 0x08(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + movq 0x10(%rdi),%xmm5 + pshufd $0x00,%xmm5,%xmm4 + pshufd $0x55,%xmm5,%xmm5 + movq 0x18(%rdi),%xmm7 + pshufd $0x00,%xmm7,%xmm6 + pshufd $0x55,%xmm7,%xmm7 + movq 0x20(%rdi),%xmm9 + pshufd $0x00,%xmm9,%xmm8 + pshufd $0x55,%xmm9,%xmm9 + movq 0x28(%rdi),%xmm11 + pshufd $0x00,%xmm11,%xmm10 + pshufd $0x55,%xmm11,%xmm11 + movq 0x30(%rdi),%xmm13 + pshufd $0x00,%xmm13,%xmm12 + pshufd $0x55,%xmm13,%xmm13 + movq 0x38(%rdi),%xmm15 + pshufd $0x00,%xmm15,%xmm14 + pshufd $0x55,%xmm15,%xmm15 + # x0..3 on stack + movdqa %xmm0,0x00(%rsp) + movdqa %xmm1,0x10(%rsp) + movdqa %xmm2,0x20(%rsp) + movdqa %xmm3,0x30(%rsp) + + movdqa CTRINC(%rip),%xmm1 + movdqa ROT8(%rip),%xmm2 + movdqa ROT16(%rip),%xmm3 + + # x12 += counter values 0-3 + paddd %xmm1,%xmm12 + + mov $10,%ecx + +.Ldoubleround4: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm3,%xmm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm3,%xmm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm3,%xmm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm3,%xmm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + paddd %xmm12,%xmm8 + pxor %xmm8,%xmm4 + movdqa %xmm4,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm4 + por %xmm0,%xmm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + paddd %xmm13,%xmm9 + pxor %xmm9,%xmm5 + movdqa %xmm5,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm5 + por %xmm0,%xmm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + paddd %xmm14,%xmm10 + pxor %xmm10,%xmm6 + movdqa %xmm6,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm6 + por %xmm0,%xmm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + paddd %xmm15,%xmm11 + pxor %xmm11,%xmm7 + movdqa %xmm7,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm7 + por %xmm0,%xmm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm2,%xmm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm2,%xmm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm2,%xmm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm2,%xmm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + paddd %xmm12,%xmm8 + pxor %xmm8,%xmm4 + movdqa %xmm4,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm4 + por %xmm0,%xmm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + paddd %xmm13,%xmm9 + pxor %xmm9,%xmm5 + movdqa %xmm5,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm5 + por %xmm0,%xmm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + paddd %xmm14,%xmm10 + pxor %xmm10,%xmm6 + movdqa %xmm6,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm6 + por %xmm0,%xmm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + paddd %xmm15,%xmm11 + pxor %xmm11,%xmm7 + movdqa %xmm7,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm7 + por %xmm0,%xmm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm3,%xmm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm3,%xmm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm3,%xmm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm3,%xmm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + paddd %xmm15,%xmm10 + pxor %xmm10,%xmm5 + movdqa %xmm5,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm5 + por %xmm0,%xmm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + paddd %xmm12,%xmm11 + pxor %xmm11,%xmm6 + movdqa %xmm6,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm6 + por %xmm0,%xmm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + paddd %xmm13,%xmm8 + pxor %xmm8,%xmm7 + movdqa %xmm7,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm7 + por %xmm0,%xmm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + paddd %xmm14,%xmm9 + pxor %xmm9,%xmm4 + movdqa %xmm4,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm4 + por %xmm0,%xmm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm2,%xmm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm2,%xmm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm2,%xmm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm2,%xmm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + paddd %xmm15,%xmm10 + pxor %xmm10,%xmm5 + movdqa %xmm5,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm5 + por %xmm0,%xmm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + paddd %xmm12,%xmm11 + pxor %xmm11,%xmm6 + movdqa %xmm6,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm6 + por %xmm0,%xmm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + paddd %xmm13,%xmm8 + pxor %xmm8,%xmm7 + movdqa %xmm7,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm7 + por %xmm0,%xmm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + paddd %xmm14,%xmm9 + pxor %xmm9,%xmm4 + movdqa %xmm4,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm4 + por %xmm0,%xmm4 + + dec %ecx + jnz .Ldoubleround4 + + # x0[0-3] += s0[0] + # x1[0-3] += s0[1] + movq 0x00(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd 0x00(%rsp),%xmm2 + movdqa %xmm2,0x00(%rsp) + paddd 0x10(%rsp),%xmm3 + movdqa %xmm3,0x10(%rsp) + # x2[0-3] += s0[2] + # x3[0-3] += s0[3] + movq 0x08(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd 0x20(%rsp),%xmm2 + movdqa %xmm2,0x20(%rsp) + paddd 0x30(%rsp),%xmm3 + movdqa %xmm3,0x30(%rsp) + + # x4[0-3] += s1[0] + # x5[0-3] += s1[1] + movq 0x10(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm4 + paddd %xmm3,%xmm5 + # x6[0-3] += s1[2] + # x7[0-3] += s1[3] + movq 0x18(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm6 + paddd %xmm3,%xmm7 + + # x8[0-3] += s2[0] + # x9[0-3] += s2[1] + movq 0x20(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm8 + paddd %xmm3,%xmm9 + # x10[0-3] += s2[2] + # x11[0-3] += s2[3] + movq 0x28(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm10 + paddd %xmm3,%xmm11 + + # x12[0-3] += s3[0] + # x13[0-3] += s3[1] + movq 0x30(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm12 + paddd %xmm3,%xmm13 + # x14[0-3] += s3[2] + # x15[0-3] += s3[3] + movq 0x38(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm14 + paddd %xmm3,%xmm15 + + # x12 += counter values 0-3 + paddd %xmm1,%xmm12 + + # interleave 32-bit words in state n, n+1 + movdqa 0x00(%rsp),%xmm0 + movdqa 0x10(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpckldq %xmm1,%xmm2 + punpckhdq %xmm1,%xmm0 + movdqa %xmm2,0x00(%rsp) + movdqa %xmm0,0x10(%rsp) + movdqa 0x20(%rsp),%xmm0 + movdqa 0x30(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpckldq %xmm1,%xmm2 + punpckhdq %xmm1,%xmm0 + movdqa %xmm2,0x20(%rsp) + movdqa %xmm0,0x30(%rsp) + movdqa %xmm4,%xmm0 + punpckldq %xmm5,%xmm4 + punpckhdq %xmm5,%xmm0 + movdqa %xmm0,%xmm5 + movdqa %xmm6,%xmm0 + punpckldq %xmm7,%xmm6 + punpckhdq %xmm7,%xmm0 + movdqa %xmm0,%xmm7 + movdqa %xmm8,%xmm0 + punpckldq %xmm9,%xmm8 + punpckhdq %xmm9,%xmm0 + movdqa %xmm0,%xmm9 + movdqa %xmm10,%xmm0 + punpckldq %xmm11,%xmm10 + punpckhdq %xmm11,%xmm0 + movdqa %xmm0,%xmm11 + movdqa %xmm12,%xmm0 + punpckldq %xmm13,%xmm12 + punpckhdq %xmm13,%xmm0 + movdqa %xmm0,%xmm13 + movdqa %xmm14,%xmm0 + punpckldq %xmm15,%xmm14 + punpckhdq %xmm15,%xmm0 + movdqa %xmm0,%xmm15 + + # interleave 64-bit words in state n, n+2 + movdqa 0x00(%rsp),%xmm0 + movdqa 0x20(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpcklqdq %xmm1,%xmm2 + punpckhqdq %xmm1,%xmm0 + movdqa %xmm2,0x00(%rsp) + movdqa %xmm0,0x20(%rsp) + movdqa 0x10(%rsp),%xmm0 + movdqa 0x30(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpcklqdq %xmm1,%xmm2 + punpckhqdq %xmm1,%xmm0 + movdqa %xmm2,0x10(%rsp) + movdqa %xmm0,0x30(%rsp) + movdqa %xmm4,%xmm0 + punpcklqdq %xmm6,%xmm4 + punpckhqdq %xmm6,%xmm0 + movdqa %xmm0,%xmm6 + movdqa %xmm5,%xmm0 + punpcklqdq %xmm7,%xmm5 + punpckhqdq %xmm7,%xmm0 + movdqa %xmm0,%xmm7 + movdqa %xmm8,%xmm0 + punpcklqdq %xmm10,%xmm8 + punpckhqdq %xmm10,%xmm0 + movdqa %xmm0,%xmm10 + movdqa %xmm9,%xmm0 + punpcklqdq %xmm11,%xmm9 + punpckhqdq %xmm11,%xmm0 + movdqa %xmm0,%xmm11 + movdqa %xmm12,%xmm0 + punpcklqdq %xmm14,%xmm12 + punpckhqdq %xmm14,%xmm0 + movdqa %xmm0,%xmm14 + movdqa %xmm13,%xmm0 + punpcklqdq %xmm15,%xmm13 + punpckhqdq %xmm15,%xmm0 + movdqa %xmm0,%xmm15 + + # xor with corresponding input, write to output + movdqa 0x00(%rsp),%xmm0 + movdqu 0x00(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x00(%rsi) + movdqa 0x10(%rsp),%xmm0 + movdqu 0x80(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x80(%rsi) + movdqa 0x20(%rsp),%xmm0 + movdqu 0x40(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x40(%rsi) + movdqa 0x30(%rsp),%xmm0 + movdqu 0xc0(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0xc0(%rsi) + movdqu 0x10(%rdx),%xmm1 + pxor %xmm1,%xmm4 + movdqu %xmm4,0x10(%rsi) + movdqu 0x90(%rdx),%xmm1 + pxor %xmm1,%xmm5 + movdqu %xmm5,0x90(%rsi) + movdqu 0x50(%rdx),%xmm1 + pxor %xmm1,%xmm6 + movdqu %xmm6,0x50(%rsi) + movdqu 0xd0(%rdx),%xmm1 + pxor %xmm1,%xmm7 + movdqu %xmm7,0xd0(%rsi) + movdqu 0x20(%rdx),%xmm1 + pxor %xmm1,%xmm8 + movdqu %xmm8,0x20(%rsi) + movdqu 0xa0(%rdx),%xmm1 + pxor %xmm1,%xmm9 + movdqu %xmm9,0xa0(%rsi) + movdqu 0x60(%rdx),%xmm1 + pxor %xmm1,%xmm10 + movdqu %xmm10,0x60(%rsi) + movdqu 0xe0(%rdx),%xmm1 + pxor %xmm1,%xmm11 + movdqu %xmm11,0xe0(%rsi) + movdqu 0x30(%rdx),%xmm1 + pxor %xmm1,%xmm12 + movdqu %xmm12,0x30(%rsi) + movdqu 0xb0(%rdx),%xmm1 + pxor %xmm1,%xmm13 + movdqu %xmm13,0xb0(%rsi) + movdqu 0x70(%rdx),%xmm1 + pxor %xmm1,%xmm14 + movdqu %xmm14,0x70(%rsi) + movdqu 0xf0(%rdx),%xmm1 + pxor %xmm1,%xmm15 + movdqu %xmm15,0xf0(%rsi) + + add $0x40,%rsp + ret +ENDPROC(chacha20_4block_xor_ssse3) diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c new file mode 100644 index 000000000000..effe2160b7c5 --- /dev/null +++ b/arch/x86/crypto/chacha20_glue.c @@ -0,0 +1,150 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/chacha20.h> +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> + +#define CHACHA20_STATE_ALIGN 16 + +asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); +asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); +#ifdef CONFIG_AS_AVX2 +asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src); +static bool chacha20_use_avx2; +#endif + +static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) +{ + u8 buf[CHACHA20_BLOCK_SIZE]; + +#ifdef CONFIG_AS_AVX2 + if (chacha20_use_avx2) { + while (bytes >= CHACHA20_BLOCK_SIZE * 8) { + chacha20_8block_xor_avx2(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE * 8; + src += CHACHA20_BLOCK_SIZE * 8; + dst += CHACHA20_BLOCK_SIZE * 8; + state[12] += 8; + } + } +#endif + while (bytes >= CHACHA20_BLOCK_SIZE * 4) { + chacha20_4block_xor_ssse3(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE * 4; + src += CHACHA20_BLOCK_SIZE * 4; + dst += CHACHA20_BLOCK_SIZE * 4; + state[12] += 4; + } + while (bytes >= CHACHA20_BLOCK_SIZE) { + chacha20_block_xor_ssse3(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE; + src += CHACHA20_BLOCK_SIZE; + dst += CHACHA20_BLOCK_SIZE; + state[12]++; + } + if (bytes) { + memcpy(buf, src, bytes); + chacha20_block_xor_ssse3(state, buf, buf); + memcpy(dst, buf, bytes); + } +} + +static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1]; + struct blkcipher_walk walk; + int err; + + if (!may_use_simd()) + return crypto_chacha20_crypt(desc, dst, src, nbytes); + + state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); + + crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); + + kernel_fpu_begin(); + + while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); + err = blkcipher_walk_done(desc, &walk, + walk.nbytes % CHACHA20_BLOCK_SIZE); + } + + if (walk.nbytes) { + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg alg = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-simd", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = sizeof(struct chacha20_ctx), + .cra_alignmask = sizeof(u32) - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = CHACHA20_KEY_SIZE, + .max_keysize = CHACHA20_KEY_SIZE, + .ivsize = CHACHA20_IV_SIZE, + .geniv = "seqiv", + .setkey = crypto_chacha20_setkey, + .encrypt = chacha20_simd, + .decrypt = chacha20_simd, + }, + }, +}; + +static int __init chacha20_simd_mod_init(void) +{ + if (!cpu_has_ssse3) + return -ENODEV; + +#ifdef CONFIG_AS_AVX2 + chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 && + cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); +#endif + return crypto_register_alg(&alg); +} + +static void __exit chacha20_simd_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(chacha20_simd_mod_init); +module_exit(chacha20_simd_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-simd"); diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S new file mode 100644 index 000000000000..eff2f414e22b --- /dev/null +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -0,0 +1,386 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 32 + +ANMASK: .octa 0x0000000003ffffff0000000003ffffff + .octa 0x0000000003ffffff0000000003ffffff +ORMASK: .octa 0x00000000010000000000000001000000 + .octa 0x00000000010000000000000001000000 + +.text + +#define h0 0x00(%rdi) +#define h1 0x04(%rdi) +#define h2 0x08(%rdi) +#define h3 0x0c(%rdi) +#define h4 0x10(%rdi) +#define r0 0x00(%rdx) +#define r1 0x04(%rdx) +#define r2 0x08(%rdx) +#define r3 0x0c(%rdx) +#define r4 0x10(%rdx) +#define u0 0x00(%r8) +#define u1 0x04(%r8) +#define u2 0x08(%r8) +#define u3 0x0c(%r8) +#define u4 0x10(%r8) +#define w0 0x14(%r8) +#define w1 0x18(%r8) +#define w2 0x1c(%r8) +#define w3 0x20(%r8) +#define w4 0x24(%r8) +#define y0 0x28(%r8) +#define y1 0x2c(%r8) +#define y2 0x30(%r8) +#define y3 0x34(%r8) +#define y4 0x38(%r8) +#define m %rsi +#define hc0 %ymm0 +#define hc1 %ymm1 +#define hc2 %ymm2 +#define hc3 %ymm3 +#define hc4 %ymm4 +#define hc0x %xmm0 +#define hc1x %xmm1 +#define hc2x %xmm2 +#define hc3x %xmm3 +#define hc4x %xmm4 +#define t1 %ymm5 +#define t2 %ymm6 +#define t1x %xmm5 +#define t2x %xmm6 +#define ruwy0 %ymm7 +#define ruwy1 %ymm8 +#define ruwy2 %ymm9 +#define ruwy3 %ymm10 +#define ruwy4 %ymm11 +#define ruwy0x %xmm7 +#define ruwy1x %xmm8 +#define ruwy2x %xmm9 +#define ruwy3x %xmm10 +#define ruwy4x %xmm11 +#define svxz1 %ymm12 +#define svxz2 %ymm13 +#define svxz3 %ymm14 +#define svxz4 %ymm15 +#define d0 %r9 +#define d1 %r10 +#define d2 %r11 +#define d3 %r12 +#define d4 %r13 + +ENTRY(poly1305_4block_avx2) + # %rdi: Accumulator h[5] + # %rsi: 64 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Quadblock count + # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], + + # This four-block variant uses loop unrolled block processing. It + # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: + # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r + + vzeroupper + push %rbx + push %r12 + push %r13 + + # combine r0,u0,w0,y0 + vmovd y0,ruwy0x + vmovd w0,t1x + vpunpcklqdq t1,ruwy0,ruwy0 + vmovd u0,t1x + vmovd r0,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy0,ruwy0 + + # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 + vmovd y1,ruwy1x + vmovd w1,t1x + vpunpcklqdq t1,ruwy1,ruwy1 + vmovd u1,t1x + vmovd r1,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy1,ruwy1 + vpslld $2,ruwy1,svxz1 + vpaddd ruwy1,svxz1,svxz1 + + # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 + vmovd y2,ruwy2x + vmovd w2,t1x + vpunpcklqdq t1,ruwy2,ruwy2 + vmovd u2,t1x + vmovd r2,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy2,ruwy2 + vpslld $2,ruwy2,svxz2 + vpaddd ruwy2,svxz2,svxz2 + + # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 + vmovd y3,ruwy3x + vmovd w3,t1x + vpunpcklqdq t1,ruwy3,ruwy3 + vmovd u3,t1x + vmovd r3,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy3,ruwy3 + vpslld $2,ruwy3,svxz3 + vpaddd ruwy3,svxz3,svxz3 + + # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 + vmovd y4,ruwy4x + vmovd w4,t1x + vpunpcklqdq t1,ruwy4,ruwy4 + vmovd u4,t1x + vmovd r4,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy4,ruwy4 + vpslld $2,ruwy4,svxz4 + vpaddd ruwy4,svxz4,svxz4 + +.Ldoblock4: + # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, + # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] + vmovd 0x00(m),hc0x + vmovd 0x10(m),t1x + vpunpcklqdq t1,hc0,hc0 + vmovd 0x20(m),t1x + vmovd 0x30(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc0,hc0 + vpand ANMASK(%rip),hc0,hc0 + vmovd h0,t1x + vpaddd t1,hc0,hc0 + # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, + # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] + vmovd 0x03(m),hc1x + vmovd 0x13(m),t1x + vpunpcklqdq t1,hc1,hc1 + vmovd 0x23(m),t1x + vmovd 0x33(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc1,hc1 + vpsrld $2,hc1,hc1 + vpand ANMASK(%rip),hc1,hc1 + vmovd h1,t1x + vpaddd t1,hc1,hc1 + # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, + # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] + vmovd 0x06(m),hc2x + vmovd 0x16(m),t1x + vpunpcklqdq t1,hc2,hc2 + vmovd 0x26(m),t1x + vmovd 0x36(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc2,hc2 + vpsrld $4,hc2,hc2 + vpand ANMASK(%rip),hc2,hc2 + vmovd h2,t1x + vpaddd t1,hc2,hc2 + # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, + # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] + vmovd 0x09(m),hc3x + vmovd 0x19(m),t1x + vpunpcklqdq t1,hc3,hc3 + vmovd 0x29(m),t1x + vmovd 0x39(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc3,hc3 + vpsrld $6,hc3,hc3 + vpand ANMASK(%rip),hc3,hc3 + vmovd h3,t1x + vpaddd t1,hc3,hc3 + # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), + # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] + vmovd 0x0c(m),hc4x + vmovd 0x1c(m),t1x + vpunpcklqdq t1,hc4,hc4 + vmovd 0x2c(m),t1x + vmovd 0x3c(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc4,hc4 + vpsrld $8,hc4,hc4 + vpor ORMASK(%rip),hc4,hc4 + vmovd h4,t1x + vpaddd t1,hc4,hc4 + + # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] + vpmuludq hc0,ruwy0,t1 + # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] + vpmuludq hc1,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] + vpmuludq hc2,svxz3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] + vpmuludq hc3,svxz2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] + vpmuludq hc4,svxz1,t2 + vpaddq t2,t1,t1 + # d0 = t1[0] + t1[1] + t[2] + t[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d0 + + # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] + vpmuludq hc0,ruwy1,t1 + # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] + vpmuludq hc1,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] + vpmuludq hc2,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] + vpmuludq hc3,svxz3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] + vpmuludq hc4,svxz2,t2 + vpaddq t2,t1,t1 + # d1 = t1[0] + t1[1] + t1[3] + t1[4] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d1 + + # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] + vpmuludq hc0,ruwy2,t1 + # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] + vpmuludq hc1,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] + vpmuludq hc2,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] + vpmuludq hc3,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] + vpmuludq hc4,svxz3,t2 + vpaddq t2,t1,t1 + # d2 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d2 + + # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] + vpmuludq hc0,ruwy3,t1 + # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] + vpmuludq hc1,ruwy2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] + vpmuludq hc2,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] + vpmuludq hc3,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] + vpmuludq hc4,svxz4,t2 + vpaddq t2,t1,t1 + # d3 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d3 + + # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] + vpmuludq hc0,ruwy4,t1 + # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] + vpmuludq hc1,ruwy3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] + vpmuludq hc2,ruwy2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] + vpmuludq hc3,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] + vpmuludq hc4,ruwy0,t2 + vpaddq t2,t1,t1 + # d4 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x40,m + dec %rcx + jnz .Ldoblock4 + + vzeroupper + pop %r13 + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_4block_avx2) diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S new file mode 100644 index 000000000000..338c748054ed --- /dev/null +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -0,0 +1,582 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 16 + +ANMASK: .octa 0x0000000003ffffff0000000003ffffff +ORMASK: .octa 0x00000000010000000000000001000000 + +.text + +#define h0 0x00(%rdi) +#define h1 0x04(%rdi) +#define h2 0x08(%rdi) +#define h3 0x0c(%rdi) +#define h4 0x10(%rdi) +#define r0 0x00(%rdx) +#define r1 0x04(%rdx) +#define r2 0x08(%rdx) +#define r3 0x0c(%rdx) +#define r4 0x10(%rdx) +#define s1 0x00(%rsp) +#define s2 0x04(%rsp) +#define s3 0x08(%rsp) +#define s4 0x0c(%rsp) +#define m %rsi +#define h01 %xmm0 +#define h23 %xmm1 +#define h44 %xmm2 +#define t1 %xmm3 +#define t2 %xmm4 +#define t3 %xmm5 +#define t4 %xmm6 +#define mask %xmm7 +#define d0 %r8 +#define d1 %r9 +#define d2 %r10 +#define d3 %r11 +#define d4 %r12 + +ENTRY(poly1305_block_sse2) + # %rdi: Accumulator h[5] + # %rsi: 16 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Block count + + # This single block variant tries to improve performance by doing two + # multiplications in parallel using SSE instructions. There is quite + # some quardword packing involved, hence the speedup is marginal. + + push %rbx + push %r12 + sub $0x10,%rsp + + # s1..s4 = r1..r4 * 5 + mov r1,%eax + lea (%eax,%eax,4),%eax + mov %eax,s1 + mov r2,%eax + lea (%eax,%eax,4),%eax + mov %eax,s2 + mov r3,%eax + lea (%eax,%eax,4),%eax + mov %eax,s3 + mov r4,%eax + lea (%eax,%eax,4),%eax + mov %eax,s4 + + movdqa ANMASK(%rip),mask + +.Ldoblock: + # h01 = [0, h1, 0, h0] + # h23 = [0, h3, 0, h2] + # h44 = [0, h4, 0, h4] + movd h0,h01 + movd h1,t1 + movd h2,h23 + movd h3,t2 + movd h4,h44 + punpcklqdq t1,h01 + punpcklqdq t2,h23 + punpcklqdq h44,h44 + + # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ] + movd 0x00(m),t1 + movd 0x03(m),t2 + psrld $2,t2 + punpcklqdq t2,t1 + pand mask,t1 + paddd t1,h01 + # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ] + movd 0x06(m),t1 + movd 0x09(m),t2 + psrld $4,t1 + psrld $6,t2 + punpcklqdq t2,t1 + pand mask,t1 + paddd t1,h23 + # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ] + mov 0x0c(m),%eax + shr $8,%eax + or $0x01000000,%eax + movd %eax,t1 + pshufd $0xc4,t1,t1 + paddd t1,h44 + + # t1[0] = h0 * r0 + h2 * s3 + # t1[1] = h1 * s4 + h3 * s2 + movd r0,t1 + movd s4,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd s3,t2 + movd s2,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t2[0] = h0 * r1 + h2 * s4 + # t2[1] = h1 * r0 + h3 * s3 + movd r1,t2 + movd r0,t3 + punpcklqdq t3,t2 + pmuludq h01,t2 + movd s4,t3 + movd s3,t4 + punpcklqdq t4,t3 + pmuludq h23,t3 + paddq t3,t2 + # t3[0] = h4 * s1 + # t3[1] = h4 * s2 + movd s1,t3 + movd s2,t4 + punpcklqdq t4,t3 + pmuludq h44,t3 + # d0 = t1[0] + t1[1] + t3[0] + # d1 = t2[0] + t2[1] + t3[1] + movdqa t1,t4 + punpcklqdq t2,t4 + punpckhqdq t2,t1 + paddq t4,t1 + paddq t3,t1 + movq t1,d0 + psrldq $8,t1 + movq t1,d1 + + # t1[0] = h0 * r2 + h2 * r0 + # t1[1] = h1 * r1 + h3 * s4 + movd r2,t1 + movd r1,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd r0,t2 + movd s4,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t2[0] = h0 * r3 + h2 * r1 + # t2[1] = h1 * r2 + h3 * r0 + movd r3,t2 + movd r2,t3 + punpcklqdq t3,t2 + pmuludq h01,t2 + movd r1,t3 + movd r0,t4 + punpcklqdq t4,t3 + pmuludq h23,t3 + paddq t3,t2 + # t3[0] = h4 * s3 + # t3[1] = h4 * s4 + movd s3,t3 + movd s4,t4 + punpcklqdq t4,t3 + pmuludq h44,t3 + # d2 = t1[0] + t1[1] + t3[0] + # d3 = t2[0] + t2[1] + t3[1] + movdqa t1,t4 + punpcklqdq t2,t4 + punpckhqdq t2,t1 + paddq t4,t1 + paddq t3,t1 + movq t1,d2 + psrldq $8,t1 + movq t1,d3 + + # t1[0] = h0 * r4 + h2 * r2 + # t1[1] = h1 * r3 + h3 * r1 + movd r4,t1 + movd r3,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd r2,t2 + movd r1,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t3[0] = h4 * r0 + movd r0,t3 + pmuludq h44,t3 + # d4 = t1[0] + t1[1] + t3[0] + movdqa t1,t4 + psrldq $8,t4 + paddq t4,t1 + paddq t3,t1 + movq t1,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x10,m + dec %rcx + jnz .Ldoblock + + add $0x10,%rsp + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_block_sse2) + + +#define u0 0x00(%r8) +#define u1 0x04(%r8) +#define u2 0x08(%r8) +#define u3 0x0c(%r8) +#define u4 0x10(%r8) +#define hc0 %xmm0 +#define hc1 %xmm1 +#define hc2 %xmm2 +#define hc3 %xmm5 +#define hc4 %xmm6 +#define ru0 %xmm7 +#define ru1 %xmm8 +#define ru2 %xmm9 +#define ru3 %xmm10 +#define ru4 %xmm11 +#define sv1 %xmm12 +#define sv2 %xmm13 +#define sv3 %xmm14 +#define sv4 %xmm15 +#undef d0 +#define d0 %r13 + +ENTRY(poly1305_2block_sse2) + # %rdi: Accumulator h[5] + # %rsi: 16 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Doubleblock count + # %r8: Poly1305 derived key r^2 u[5] + + # This two-block variant further improves performance by using loop + # unrolled block processing. This is more straight forward and does + # less byte shuffling, but requires a second Poly1305 key r^2: + # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r + + push %rbx + push %r12 + push %r13 + + # combine r0,u0 + movd u0,ru0 + movd r0,t1 + punpcklqdq t1,ru0 + + # combine r1,u1 and s1=r1*5,v1=u1*5 + movd u1,ru1 + movd r1,t1 + punpcklqdq t1,ru1 + movdqa ru1,sv1 + pslld $2,sv1 + paddd ru1,sv1 + + # combine r2,u2 and s2=r2*5,v2=u2*5 + movd u2,ru2 + movd r2,t1 + punpcklqdq t1,ru2 + movdqa ru2,sv2 + pslld $2,sv2 + paddd ru2,sv2 + + # combine r3,u3 and s3=r3*5,v3=u3*5 + movd u3,ru3 + movd r3,t1 + punpcklqdq t1,ru3 + movdqa ru3,sv3 + pslld $2,sv3 + paddd ru3,sv3 + + # combine r4,u4 and s4=r4*5,v4=u4*5 + movd u4,ru4 + movd r4,t1 + punpcklqdq t1,ru4 + movdqa ru4,sv4 + pslld $2,sv4 + paddd ru4,sv4 + +.Ldoblock2: + # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ] + movd 0x00(m),hc0 + movd 0x10(m),t1 + punpcklqdq t1,hc0 + pand ANMASK(%rip),hc0 + movd h0,t1 + paddd t1,hc0 + # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ] + movd 0x03(m),hc1 + movd 0x13(m),t1 + punpcklqdq t1,hc1 + psrld $2,hc1 + pand ANMASK(%rip),hc1 + movd h1,t1 + paddd t1,hc1 + # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ] + movd 0x06(m),hc2 + movd 0x16(m),t1 + punpcklqdq t1,hc2 + psrld $4,hc2 + pand ANMASK(%rip),hc2 + movd h2,t1 + paddd t1,hc2 + # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ] + movd 0x09(m),hc3 + movd 0x19(m),t1 + punpcklqdq t1,hc3 + psrld $6,hc3 + pand ANMASK(%rip),hc3 + movd h3,t1 + paddd t1,hc3 + # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ] + movd 0x0c(m),hc4 + movd 0x1c(m),t1 + punpcklqdq t1,hc4 + psrld $8,hc4 + por ORMASK(%rip),hc4 + movd h4,t1 + paddd t1,hc4 + + # t1 = [ hc0[1] * r0, hc0[0] * u0 ] + movdqa ru0,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * s4, hc1[0] * v4 ] + movdqa sv4,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * s3, hc2[0] * v3 ] + movdqa sv3,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s2, hc3[0] * v2 ] + movdqa sv2,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s1, hc4[0] * v1 ] + movdqa sv1,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d0 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d0 + + # t1 = [ hc0[1] * r1, hc0[0] * u1 ] + movdqa ru1,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r0, hc1[0] * u0 ] + movdqa ru0,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * s4, hc2[0] * v4 ] + movdqa sv4,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s3, hc3[0] * v3 ] + movdqa sv3,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s2, hc4[0] * v2 ] + movdqa sv2,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d1 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d1 + + # t1 = [ hc0[1] * r2, hc0[0] * u2 ] + movdqa ru2,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r1, hc1[0] * u1 ] + movdqa ru1,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r0, hc2[0] * u0 ] + movdqa ru0,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s4, hc3[0] * v4 ] + movdqa sv4,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s3, hc4[0] * v3 ] + movdqa sv3,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d2 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d2 + + # t1 = [ hc0[1] * r3, hc0[0] * u3 ] + movdqa ru3,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r2, hc1[0] * u2 ] + movdqa ru2,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r1, hc2[0] * u1 ] + movdqa ru1,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * r0, hc3[0] * u0 ] + movdqa ru0,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s4, hc4[0] * v4 ] + movdqa sv4,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d3 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d3 + + # t1 = [ hc0[1] * r4, hc0[0] * u4 ] + movdqa ru4,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r3, hc1[0] * u3 ] + movdqa ru3,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r2, hc2[0] * u2 ] + movdqa ru2,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * r1, hc3[0] * u1 ] + movdqa ru1,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * r0, hc4[0] * u0 ] + movdqa ru0,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d4 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x20,m + dec %rcx + jnz .Ldoblock2 + + pop %r13 + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_2block_sse2) diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c new file mode 100644 index 000000000000..f7170d764f32 --- /dev/null +++ b/arch/x86/crypto/poly1305_glue.c @@ -0,0 +1,207 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/internal/hash.h> +#include <crypto/poly1305.h> +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> + +struct poly1305_simd_desc_ctx { + struct poly1305_desc_ctx base; + /* derived key u set? */ + bool uset; +#ifdef CONFIG_AS_AVX2 + /* derived keys r^3, r^4 set? */ + bool wset; +#endif + /* derived Poly1305 key r^2 */ + u32 u[5]; + /* ... silently appended r^3 and r^4 when using AVX2 */ +}; + +asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, + const u32 *r, unsigned int blocks); +asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, + unsigned int blocks, const u32 *u); +#ifdef CONFIG_AS_AVX2 +asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, + unsigned int blocks, const u32 *u); +static bool poly1305_use_avx2; +#endif + +static int poly1305_simd_init(struct shash_desc *desc) +{ + struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc); + + sctx->uset = false; +#ifdef CONFIG_AS_AVX2 + sctx->wset = false; +#endif + + return crypto_poly1305_init(desc); +} + +static void poly1305_simd_mult(u32 *a, const u32 *b) +{ + u8 m[POLY1305_BLOCK_SIZE]; + + memset(m, 0, sizeof(m)); + /* The poly1305 block function adds a hi-bit to the accumulator which + * we don't need for key multiplication; compensate for it. */ + a[4] -= 1 << 24; + poly1305_block_sse2(a, m, b, 1); +} + +static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + struct poly1305_simd_desc_ctx *sctx; + unsigned int blocks, datalen; + + BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); + sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); + + if (unlikely(!dctx->sset)) { + datalen = crypto_poly1305_setdesckey(dctx, src, srclen); + src += srclen - datalen; + srclen = datalen; + } + +#ifdef CONFIG_AS_AVX2 + if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { + if (unlikely(!sctx->wset)) { + if (!sctx->uset) { + memcpy(sctx->u, dctx->r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r); + sctx->uset = true; + } + memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u + 5, dctx->r); + memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u + 10, dctx->r); + sctx->wset = true; + } + blocks = srclen / (POLY1305_BLOCK_SIZE * 4); + poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); + src += POLY1305_BLOCK_SIZE * 4 * blocks; + srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; + } +#endif + if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { + if (unlikely(!sctx->uset)) { + memcpy(sctx->u, dctx->r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r); + sctx->uset = true; + } + blocks = srclen / (POLY1305_BLOCK_SIZE * 2); + poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); + src += POLY1305_BLOCK_SIZE * 2 * blocks; + srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; + } + if (srclen >= POLY1305_BLOCK_SIZE) { + poly1305_block_sse2(dctx->h, src, dctx->r, 1); + srclen -= POLY1305_BLOCK_SIZE; + } + return srclen; +} + +static int poly1305_simd_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int bytes; + + /* kernel_fpu_begin/end is costly, use fallback for small updates */ + if (srclen <= 288 || !may_use_simd()) + return crypto_poly1305_update(desc, src, srclen); + + kernel_fpu_begin(); + + if (unlikely(dctx->buflen)) { + bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); + memcpy(dctx->buf + dctx->buflen, src, bytes); + src += bytes; + srclen -= bytes; + dctx->buflen += bytes; + + if (dctx->buflen == POLY1305_BLOCK_SIZE) { + poly1305_simd_blocks(dctx, dctx->buf, + POLY1305_BLOCK_SIZE); + dctx->buflen = 0; + } + } + + if (likely(srclen >= POLY1305_BLOCK_SIZE)) { + bytes = poly1305_simd_blocks(dctx, src, srclen); + src += srclen - bytes; + srclen = bytes; + } + + kernel_fpu_end(); + + if (unlikely(srclen)) { + dctx->buflen = srclen; + memcpy(dctx->buf, src, srclen); + } + + return 0; +} + +static struct shash_alg alg = { + .digestsize = POLY1305_DIGEST_SIZE, + .init = poly1305_simd_init, + .update = poly1305_simd_update, + .final = crypto_poly1305_final, + .setkey = crypto_poly1305_setkey, + .descsize = sizeof(struct poly1305_simd_desc_ctx), + .base = { + .cra_name = "poly1305", + .cra_driver_name = "poly1305-simd", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_alignmask = sizeof(u32) - 1, + .cra_blocksize = POLY1305_BLOCK_SIZE, + .cra_module = THIS_MODULE, + }, +}; + +static int __init poly1305_simd_mod_init(void) +{ + if (!cpu_has_xmm2) + return -ENODEV; + +#ifdef CONFIG_AS_AVX2 + poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && + cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); + alg.descsize = sizeof(struct poly1305_simd_desc_ctx); + if (poly1305_use_avx2) + alg.descsize += 10 * sizeof(u32); +#endif + return crypto_register_shash(&alg); +} + +static void __exit poly1305_simd_mod_exit(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(poly1305_simd_mod_init); +module_exit(poly1305_simd_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Poly1305 authenticator"); +MODULE_ALIAS_CRYPTO("poly1305"); +MODULE_ALIAS_CRYPTO("poly1305-simd"); diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 7a144971db79..bd55dedd7614 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -2,6 +2,7 @@ # Makefile for the x86 low level entry code # obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o +obj-y += common.o obj-y += vdso/ obj-y += vsyscall/ diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f4e6308c4200..3c71dd947c7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -135,9 +135,6 @@ For 32-bit we have the following conventions - kernel is built with movq %rbp, 4*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) .endm - .macro SAVE_EXTRA_REGS_RBP offset=0 - movq %rbp, 4*8+\offset(%rsp) - .endm .macro RESTORE_EXTRA_REGS offset=0 movq 0*8+\offset(%rsp), %r15 @@ -193,12 +190,6 @@ For 32-bit we have the following conventions - kernel is built with .macro RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_HELPER 1,0,0,1,1 .endm - .macro RESTORE_RSI_RDI - RESTORE_C_REGS_HELPER 0,0,0,0,0 - .endm - .macro RESTORE_RSI_RDI_RDX - RESTORE_C_REGS_HELPER 0,0,0,0,1 - .endm .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 subq $-(15*8+\addskip), %rsp diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c new file mode 100644 index 000000000000..80dcc9261ca3 --- /dev/null +++ b/arch/x86/entry/common.c @@ -0,0 +1,318 @@ +/* + * common.c - C code for kernel entry and exit + * Copyright (c) 2015 Andrew Lutomirski + * GPL v2 + * + * Based on asm and ptrace code by many authors. The code here originated + * in ptrace.c and signal.c. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/tracehook.h> +#include <linux/audit.h> +#include <linux/seccomp.h> +#include <linux/signal.h> +#include <linux/export.h> +#include <linux/context_tracking.h> +#include <linux/user-return-notifier.h> +#include <linux/uprobes.h> + +#include <asm/desc.h> +#include <asm/traps.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +#ifdef CONFIG_CONTEXT_TRACKING +/* Called on entry from user mode with IRQs off. */ +__visible void enter_from_user_mode(void) +{ + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit(); +} +#endif + +static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) +{ +#ifdef CONFIG_X86_64 + if (arch == AUDIT_ARCH_X86_64) { + audit_syscall_entry(regs->orig_ax, regs->di, + regs->si, regs->dx, regs->r10); + } else +#endif + { + audit_syscall_entry(regs->orig_ax, regs->bx, + regs->cx, regs->dx, regs->si); + } +} + +/* + * We can return 0 to resume the syscall or anything else to go to phase + * 2. If we resume the syscall, we need to put something appropriate in + * regs->orig_ax. + * + * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax + * are fully functional. + * + * For phase 2's benefit, our return value is: + * 0: resume the syscall + * 1: go to phase 2; no seccomp phase 2 needed + * anything else: go to phase 2; pass return value to seccomp + */ +unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) +{ + unsigned long ret = 0; + u32 work; + + BUG_ON(regs != task_pt_regs(current)); + + work = ACCESS_ONCE(current_thread_info()->flags) & + _TIF_WORK_SYSCALL_ENTRY; + +#ifdef CONFIG_CONTEXT_TRACKING + /* + * If TIF_NOHZ is set, we are required to call user_exit() before + * doing anything that could touch RCU. + */ + if (work & _TIF_NOHZ) { + enter_from_user_mode(); + work &= ~_TIF_NOHZ; + } +#endif + +#ifdef CONFIG_SECCOMP + /* + * Do seccomp first -- it should minimize exposure of other + * code, and keeping seccomp fast is probably more valuable + * than the rest of this. + */ + if (work & _TIF_SECCOMP) { + struct seccomp_data sd; + + sd.arch = arch; + sd.nr = regs->orig_ax; + sd.instruction_pointer = regs->ip; +#ifdef CONFIG_X86_64 + if (arch == AUDIT_ARCH_X86_64) { + sd.args[0] = regs->di; + sd.args[1] = regs->si; + sd.args[2] = regs->dx; + sd.args[3] = regs->r10; + sd.args[4] = regs->r8; + sd.args[5] = regs->r9; + } else +#endif + { + sd.args[0] = regs->bx; + sd.args[1] = regs->cx; + sd.args[2] = regs->dx; + sd.args[3] = regs->si; + sd.args[4] = regs->di; + sd.args[5] = regs->bp; + } + + BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); + BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); + + ret = seccomp_phase1(&sd); + if (ret == SECCOMP_PHASE1_SKIP) { + regs->orig_ax = -1; + ret = 0; + } else if (ret != SECCOMP_PHASE1_OK) { + return ret; /* Go directly to phase 2 */ + } + + work &= ~_TIF_SECCOMP; + } +#endif + + /* Do our best to finish without phase 2. */ + if (work == 0) + return ret; /* seccomp and/or nohz only (ret == 0 here) */ + +#ifdef CONFIG_AUDITSYSCALL + if (work == _TIF_SYSCALL_AUDIT) { + /* + * If there is no more work to be done except auditing, + * then audit in phase 1. Phase 2 always audits, so, if + * we audit here, then we can't go on to phase 2. + */ + do_audit_syscall_entry(regs, arch); + return 0; + } +#endif + + return 1; /* Something is enabled that we can't handle in phase 1 */ +} + +/* Returns the syscall nr to run (which should match regs->orig_ax). */ +long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, + unsigned long phase1_result) +{ + long ret = 0; + u32 work = ACCESS_ONCE(current_thread_info()->flags) & + _TIF_WORK_SYSCALL_ENTRY; + + BUG_ON(regs != task_pt_regs(current)); + + /* + * If we stepped into a sysenter/syscall insn, it trapped in + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. + * If user-mode had set TF itself, then it's still clear from + * do_debug() and we need to set it again to restore the user + * state. If we entered on the slow path, TF was already set. + */ + if (work & _TIF_SINGLESTEP) + regs->flags |= X86_EFLAGS_TF; + +#ifdef CONFIG_SECCOMP + /* + * Call seccomp_phase2 before running the other hooks so that + * they can see any changes made by a seccomp tracer. + */ + if (phase1_result > 1 && seccomp_phase2(phase1_result)) { + /* seccomp failures shouldn't expose any additional code. */ + return -1; + } +#endif + + if (unlikely(work & _TIF_SYSCALL_EMU)) + ret = -1L; + + if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && + tracehook_report_syscall_entry(regs)) + ret = -1L; + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->orig_ax); + + do_audit_syscall_entry(regs, arch); + + return ret ?: regs->orig_ax; +} + +long syscall_trace_enter(struct pt_regs *regs) +{ + u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; + unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); + + if (phase1_result == 0) + return regs->orig_ax; + else + return syscall_trace_enter_phase2(regs, arch, phase1_result); +} + +static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs) +{ + unsigned long top_of_stack = + (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING; + return (struct thread_info *)(top_of_stack - THREAD_SIZE); +} + +/* Called with IRQs disabled. */ +__visible void prepare_exit_to_usermode(struct pt_regs *regs) +{ + if (WARN_ON(!irqs_disabled())) + local_irq_disable(); + + /* + * In order to return to user mode, we need to have IRQs off with + * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, + * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags + * can be set at any time on preemptable kernels if we have IRQs on, + * so we need to loop. Disabling preemption wouldn't help: doing the + * work to clear some of the flags can sleep. + */ + while (true) { + u32 cached_flags = + READ_ONCE(pt_regs_to_thread_info(regs)->flags); + + if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | + _TIF_UPROBE | _TIF_NEED_RESCHED | + _TIF_USER_RETURN_NOTIFY))) + break; + + /* We have work to do. */ + local_irq_enable(); + + if (cached_flags & _TIF_NEED_RESCHED) + schedule(); + + if (cached_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + /* deal with pending signal delivery */ + if (cached_flags & _TIF_SIGPENDING) + do_signal(regs); + + if (cached_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + + if (cached_flags & _TIF_USER_RETURN_NOTIFY) + fire_user_return_notifiers(); + + /* Disable IRQs and retry */ + local_irq_disable(); + } + + user_enter(); +} + +/* + * Called with IRQs on and fully valid regs. Returns with IRQs off in a + * state such that we can immediately switch to user mode. + */ +__visible void syscall_return_slowpath(struct pt_regs *regs) +{ + struct thread_info *ti = pt_regs_to_thread_info(regs); + u32 cached_flags = READ_ONCE(ti->flags); + bool step; + + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + + if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled", + regs->orig_ax)) + local_irq_enable(); + + /* + * First do one-time work. If these work items are enabled, we + * want to run them exactly once per syscall exit with IRQs on. + */ + if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | + _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) { + audit_syscall_exit(regs); + + if (cached_flags & _TIF_SYSCALL_TRACEPOINT) + trace_sys_exit(regs, regs->ax); + + /* + * If TIF_SYSCALL_EMU is set, we only get here because of + * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). + * We already reported this syscall instruction in + * syscall_trace_enter(). + */ + step = unlikely( + (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) + == _TIF_SINGLESTEP); + if (step || cached_flags & _TIF_SYSCALL_TRACE) + tracehook_report_syscall_exit(regs, step); + } + +#ifdef CONFIG_COMPAT + /* + * Compat syscalls set TS_COMPAT. Make sure we clear it before + * returning to user mode. + */ + ti->status &= ~TS_COMPAT; +#endif + + local_irq_disable(); + prepare_exit_to_usermode(regs); +} diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 21dc60a60b5f..b2909bf8cf70 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -45,16 +45,6 @@ #include <asm/asm.h> #include <asm/smap.h> -/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ -#include <linux/elf-em.h> -#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) -#define __AUDIT_ARCH_LE 0x40000000 - -#ifndef CONFIG_AUDITSYSCALL -# define sysenter_audit syscall_trace_entry -# define sysexit_audit syscall_exit_work -#endif - .section .entry.text, "ax" /* @@ -266,14 +256,10 @@ ret_from_intr: ENTRY(resume_userspace) LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - andl $_TIF_WORK_MASK, %ecx # is there any work to be done on - # int/exception return? - jne work_pending + movl %esp, %eax + call prepare_exit_to_usermode jmp restore_all END(ret_from_exception) @@ -339,7 +325,7 @@ sysenter_past_esp: GET_THREAD_INFO(%ebp) testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) - jnz sysenter_audit + jnz syscall_trace_entry sysenter_do_call: cmpl $(NR_syscalls), %eax jae sysenter_badsys @@ -351,7 +337,7 @@ sysenter_after_call: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testl $_TIF_ALLWORK_MASK, %ecx - jnz sysexit_audit + jnz syscall_exit_work_irqs_off sysenter_exit: /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx @@ -362,40 +348,6 @@ sysenter_exit: PTGS_TO_GS ENABLE_INTERRUPTS_SYSEXIT -#ifdef CONFIG_AUDITSYSCALL -sysenter_audit: - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp) - jnz syscall_trace_entry - /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ - movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ - /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ - pushl PT_ESI(%esp) /* a3: 5th arg */ - pushl PT_EDX+4(%esp) /* a2: 4th arg */ - call __audit_syscall_entry - popl %ecx /* get that remapped edx off the stack */ - popl %ecx /* get that remapped esi off the stack */ - movl PT_EAX(%esp), %eax /* reload syscall number */ - jmp sysenter_do_call - -sysexit_audit: - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jnz syscall_exit_work - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - movl %eax, %edx /* second arg, syscall return value */ - cmpl $-MAX_ERRNO, %eax /* is it an error ? */ - setbe %al /* 1 if so, 0 if not */ - movzbl %al, %eax /* zero-extend that */ - call __audit_syscall_exit - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jnz syscall_exit_work - movl PT_EAX(%esp), %eax /* reload syscall return value */ - jmp sysenter_exit -#endif - .pushsection .fixup, "ax" 2: movl $0, PT_FS(%esp) jmp 1b @@ -421,13 +373,7 @@ syscall_after_call: movl %eax, PT_EAX(%esp) # store the return value syscall_exit: LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - testl $_TIF_ALLWORK_MASK, %ecx # current->work - jnz syscall_exit_work + jmp syscall_exit_work restore_all: TRACE_IRQS_IRET @@ -504,57 +450,6 @@ ldt_ss: #endif ENDPROC(entry_INT80_32) - # perform work that needs to be done immediately before resumption - ALIGN -work_pending: - testb $_TIF_NEED_RESCHED, %cl - jz work_notifysig -work_resched: - call schedule - LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - andl $_TIF_WORK_MASK, %ecx # is there any work to be done other - # than syscall tracing? - jz restore_all - testb $_TIF_NEED_RESCHED, %cl - jnz work_resched - -work_notifysig: # deal with pending signals and - # notify-resume requests -#ifdef CONFIG_VM86 - testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) - movl %esp, %eax - jnz work_notifysig_v86 # returning to kernel-space or - # vm86-space -1: -#else - movl %esp, %eax -#endif - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - movb PT_CS(%esp), %bl - andb $SEGMENT_RPL_MASK, %bl - cmpb $USER_RPL, %bl - jb resume_kernel - xorl %edx, %edx - call do_notify_resume - jmp resume_userspace - -#ifdef CONFIG_VM86 - ALIGN -work_notifysig_v86: - pushl %ecx # save ti_flags for do_notify_resume - call save_v86_state # %eax contains pt_regs pointer - popl %ecx - movl %eax, %esp - jmp 1b -#endif -END(work_pending) - # perform syscall exit tracing ALIGN syscall_trace_entry: @@ -569,15 +464,14 @@ END(syscall_trace_entry) # perform syscall exit tracing ALIGN -syscall_exit_work: - testl $_TIF_WORK_SYSCALL_EXIT, %ecx - jz work_pending +syscall_exit_work_irqs_off: TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call - # schedule() instead + ENABLE_INTERRUPTS(CLBR_ANY) + +syscall_exit_work: movl %esp, %eax - call syscall_trace_leave - jmp resume_userspace + call syscall_return_slowpath + jmp restore_all END(syscall_exit_work) syscall_fault: diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 8cb3e438f21e..d3033183ed70 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -33,7 +33,6 @@ #include <asm/paravirt.h> #include <asm/percpu.h> #include <asm/asm.h> -#include <asm/context_tracking.h> #include <asm/smap.h> #include <asm/pgtable_types.h> #include <linux/err.h> @@ -229,6 +228,11 @@ entry_SYSCALL_64_fastpath: */ USERGS_SYSRET64 +GLOBAL(int_ret_from_sys_call_irqs_off) + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + jmp int_ret_from_sys_call + /* Do syscall entry tracing */ tracesys: movq %rsp, %rdi @@ -272,69 +276,11 @@ tracesys_phase2: * Has correct iret frame. */ GLOBAL(int_ret_from_sys_call) - DISABLE_INTERRUPTS(CLBR_NONE) -int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */ - TRACE_IRQS_OFF - movl $_TIF_ALLWORK_MASK, %edi - /* edi: mask to check */ -GLOBAL(int_with_check) - LOCKDEP_SYS_EXIT_IRQ - GET_THREAD_INFO(%rcx) - movl TI_flags(%rcx), %edx - andl %edi, %edx - jnz int_careful - andl $~TS_COMPAT, TI_status(%rcx) - jmp syscall_return - - /* - * Either reschedule or signal or syscall exit tracking needed. - * First do a reschedule test. - * edx: work, edi: workmask - */ -int_careful: - bt $TIF_NEED_RESCHED, %edx - jnc int_very_careful - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - SCHEDULE_USER - popq %rdi - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp int_with_check - - /* handle signals and tracing -- both require a full pt_regs */ -int_very_careful: - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) SAVE_EXTRA_REGS - /* Check for syscall exit trace */ - testl $_TIF_WORK_SYSCALL_EXIT, %edx - jz int_signal - pushq %rdi - leaq 8(%rsp), %rdi /* &ptregs -> arg1 */ - call syscall_trace_leave - popq %rdi - andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi - jmp int_restore_rest - -int_signal: - testl $_TIF_DO_NOTIFY_MASK, %edx - jz 1f - movq %rsp, %rdi /* &ptregs -> arg1 */ - xorl %esi, %esi /* oldset -> arg2 */ - call do_notify_resume -1: movl $_TIF_WORK_MASK, %edi -int_restore_rest: + movq %rsp, %rdi + call syscall_return_slowpath /* returns with IRQs disabled */ RESTORE_EXTRA_REGS - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp int_with_check - -syscall_return: - /* The IRETQ could re-enable interrupts: */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_IRETQ + TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to @@ -555,23 +501,22 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld - /* - * Since nothing in interrupt handling code touches r12...r15 members - * of "struct pt_regs", and since interrupts can nest, we can save - * four stack slots and simultaneously provide - * an unwind-friendly stack layout by saving "truncated" pt_regs - * exactly up to rbp slot, without these members. - */ - ALLOC_PT_GPREGS_ON_STACK -RBP - SAVE_C_REGS -RBP - /* this goes to 0(%rsp) for unwinder, not for saving the value: */ - SAVE_EXTRA_REGS_RBP -RBP - - leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ + ALLOC_PT_GPREGS_ON_STACK + SAVE_C_REGS + SAVE_EXTRA_REGS - testb $3, CS-RBP(%rsp) + testb $3, CS(%rsp) jz 1f + + /* + * IRQ from user mode. Switch to kernel gsbase and inform context + * tracking that we're in kernel mode. + */ SWAPGS +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + 1: /* * Save previous stack pointer, optionally switch to interrupt stack. @@ -580,14 +525,14 @@ END(irq_entries_start) * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ - movq %rsp, %rsi + movq %rsp, %rdi incl PER_CPU_VAR(irq_count) cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp - pushq %rsi + pushq %rdi /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF - call \func + call \func /* rdi points to pt_regs */ .endm /* @@ -606,34 +551,19 @@ ret_from_intr: decl PER_CPU_VAR(irq_count) /* Restore saved previous stack */ - popq %rsi - /* return code expects complete pt_regs - adjust rsp accordingly: */ - leaq -RBP(%rsi), %rsp + popq %rsp testb $3, CS(%rsp) jz retint_kernel - /* Interrupt came from user space */ -retint_user: - GET_THREAD_INFO(%rcx) - /* %rcx: thread info. Interrupts are off. */ -retint_with_reschedule: - movl $_TIF_WORK_MASK, %edi -retint_check: + /* Interrupt came from user space */ LOCKDEP_SYS_EXIT_IRQ - movl TI_flags(%rcx), %edx - andl %edi, %edx - jnz retint_careful - -retint_swapgs: /* return to user-space */ - /* - * The iretq could re-enable interrupts: - */ - DISABLE_INTERRUPTS(CLBR_ANY) +GLOBAL(retint_user) + mov %rsp,%rdi + call prepare_exit_to_usermode TRACE_IRQS_IRETQ - SWAPGS - jmp restore_c_regs_and_iret + jmp restore_regs_and_iret /* Returning to kernel space */ retint_kernel: @@ -657,6 +587,8 @@ retint_kernel: * At this label, code paths which return to kernel and to user, * which come from interrupts/exception and from syscalls, merge. */ +restore_regs_and_iret: + RESTORE_EXTRA_REGS restore_c_regs_and_iret: RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 @@ -707,37 +639,6 @@ native_irq_return_ldt: popq %rax jmp native_irq_return_iret #endif - - /* edi: workmask, edx: work */ -retint_careful: - bt $TIF_NEED_RESCHED, %edx - jnc retint_signal - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - SCHEDULE_USER - popq %rdi - GET_THREAD_INFO(%rcx) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp retint_check - -retint_signal: - testl $_TIF_DO_NOTIFY_MASK, %edx - jz retint_swapgs - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_EXTRA_REGS - movq $-1, ORIG_RAX(%rsp) - xorl %esi, %esi /* oldset */ - movq %rsp, %rdi /* &pt_regs */ - call do_notify_resume - RESTORE_EXTRA_REGS - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - GET_THREAD_INFO(%rcx) - jmp retint_with_reschedule - END(common_interrupt) /* @@ -1143,12 +1044,22 @@ ENTRY(error_entry) SAVE_EXTRA_REGS 8 xorl %ebx, %ebx testb $3, CS+8(%rsp) - jz error_kernelspace + jz .Lerror_kernelspace - /* We entered from user mode */ +.Lerror_entry_from_usermode_swapgs: + /* + * We entered from user mode or we're pretending to have entered + * from user mode due to an IRET fault. + */ SWAPGS -error_entry_done: +.Lerror_entry_from_usermode_after_swapgs: +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + +.Lerror_entry_done: + TRACE_IRQS_OFF ret @@ -1158,31 +1069,30 @@ error_entry_done: * truncated RIP for IRET exceptions returning to compat mode. Check * for these here too. */ -error_kernelspace: +.Lerror_kernelspace: incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) - je error_bad_iret + je .Lerror_bad_iret movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) - je bstep_iret + je .Lbstep_iret cmpq $gs_change, RIP+8(%rsp) - jne error_entry_done + jne .Lerror_entry_done /* * hack: gs_change can fail with user gsbase. If this happens, fix up * gsbase and proceed. We'll fix up the exception and land in * gs_change's error handler with kernel gsbase. */ - SWAPGS - jmp error_entry_done + jmp .Lerror_entry_from_usermode_swapgs -bstep_iret: +.Lbstep_iret: /* Fix truncated RIP */ movq %rcx, RIP+8(%rsp) /* fall through */ -error_bad_iret: +.Lerror_bad_iret: /* * We came from an IRET to user mode, so we have user gsbase. * Switch to kernel gsbase: @@ -1198,7 +1108,7 @@ error_bad_iret: call fixup_bad_iret mov %rax, %rsp decl %ebx - jmp error_entry_done + jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) @@ -1209,7 +1119,6 @@ END(error_entry) */ ENTRY(error_exit) movl %ebx, %eax - RESTORE_EXTRA_REGS DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %eax, %eax diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index a7e257d9cb90..a9360d40fb7f 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -22,8 +22,8 @@ #define __AUDIT_ARCH_LE 0x40000000 #ifndef CONFIG_AUDITSYSCALL -# define sysexit_audit ia32_ret_from_sys_call -# define sysretl_audit ia32_ret_from_sys_call +# define sysexit_audit ia32_ret_from_sys_call_irqs_off +# define sysretl_audit ia32_ret_from_sys_call_irqs_off #endif .section .entry.text, "ax" @@ -141,7 +141,8 @@ sysexit_from_sys_call: andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) movl RIP(%rsp), %ecx /* User %eip */ movq RAX(%rsp), %rax - RESTORE_RSI_RDI + movl RSI(%rsp), %esi + movl RDI(%rsp), %edi xorl %edx, %edx /* Do not leak kernel information */ xorq %r8, %r8 xorq %r9, %r9 @@ -209,10 +210,10 @@ sysexit_from_sys_call: .endm .macro auditsys_exit exit - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) - jnz ia32_ret_from_sys_call TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) + jnz ia32_ret_from_sys_call movl %eax, %esi /* second arg, syscall return value */ cmpl $-MAX_ERRNO, %eax /* is it an error ? */ jbe 1f @@ -230,7 +231,7 @@ sysexit_from_sys_call: movq %rax, R10(%rsp) movq %rax, R9(%rsp) movq %rax, R8(%rsp) - jmp int_with_check + jmp int_ret_from_sys_call_irqs_off .endm sysenter_auditsys: @@ -365,7 +366,9 @@ cstar_dispatch: sysretl_from_sys_call: andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) - RESTORE_RSI_RDI_RDX + movl RDX(%rsp), %edx + movl RSI(%rsp), %esi + movl RDI(%rsp), %edi movl RIP(%rsp), %ecx movl EFLAGS(%rsp), %r11d movq RAX(%rsp), %rax @@ -430,8 +433,48 @@ cstar_tracesys: END(entry_SYSCALL_compat) ia32_badarg: - ASM_CLAC - movq $-EFAULT, RAX(%rsp) + /* + * So far, we've entered kernel mode, set AC, turned on IRQs, and + * saved C regs except r8-r11. We haven't done any of the other + * standard entry work, though. We want to bail, but we shouldn't + * treat this as a syscall entry since we don't even know what the + * args are. Instead, treat this as a non-syscall entry, finish + * the entry work, and immediately exit after setting AX = -EFAULT. + * + * We're really just being polite here. Killing the task outright + * would be a reasonable action, too. Given that the only valid + * way to have gotten here is through the vDSO, and we already know + * that the stack pointer is bad, the task isn't going to survive + * for long no matter what we do. + */ + + ASM_CLAC /* undo STAC */ + movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */ + + /* Fill in the rest of pt_regs */ + xorl %eax, %eax + movq %rax, R11(%rsp) + movq %rax, R10(%rsp) + movq %rax, R9(%rsp) + movq %rax, R8(%rsp) + SAVE_EXTRA_REGS + + /* Turn IRQs back off. */ + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + + /* Now finish entering normal kernel mode. */ +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + + /* And exit again. */ + jmp retint_user + +ia32_ret_from_sys_call_irqs_off: + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + ia32_ret_from_sys_call: xorl %eax, %eax /* Do not leak kernel information */ movq %rax, R11(%rsp) diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index ef8187f9d28d..25e3cf1cd8fd 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -365,3 +365,18 @@ 356 i386 memfd_create sys_memfd_create 357 i386 bpf sys_bpf 358 i386 execveat sys_execveat stub32_execveat +359 i386 socket sys_socket +360 i386 socketpair sys_socketpair +361 i386 bind sys_bind +362 i386 connect sys_connect +363 i386 listen sys_listen +364 i386 accept4 sys_accept4 +365 i386 getsockopt sys_getsockopt compat_sys_getsockopt +366 i386 setsockopt sys_setsockopt compat_sys_setsockopt +367 i386 getsockname sys_getsockname +368 i386 getpeername sys_getpeername +369 i386 sendto sys_sendto +370 i386 sendmsg sys_sendmsg compat_sys_sendmsg +371 i386 recvfrom sys_recvfrom compat_sys_recvfrom +372 i386 recvmsg sys_recvmsg compat_sys_recvmsg +373 i386 shutdown sys_shutdown diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index e97032069f88..a3d0767a6b29 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -8,7 +8,7 @@ KASAN_SANITIZE := n VDSO64-$(CONFIG_X86_64) := y VDSOX32-$(CONFIG_X86_X32_ABI) := y VDSO32-$(CONFIG_X86_32) := y -VDSO32-$(CONFIG_COMPAT) := y +VDSO32-$(CONFIG_IA32_EMULATION) := y # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o @@ -20,7 +20,7 @@ obj-y += vma.o vdso_img-$(VDSO64-y) += 64 vdso_img-$(VDSOX32-y) += x32 vdso_img-$(VDSO32-y) += 32-int80 -vdso_img-$(CONFIG_COMPAT) += 32-syscall +vdso_img-$(CONFIG_IA32_EMULATION) += 32-syscall vdso_img-$(VDSO32-y) += 32-sysenter obj-$(VDSO32-y) += vdso32-setup.o @@ -126,7 +126,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE # Build multiple 32-bit vDSO images to choose from at boot time. # vdso32.so-$(VDSO32-y) += int80 -vdso32.so-$(CONFIG_COMPAT) += syscall +vdso32.so-$(CONFIG_IA32_EMULATION) += syscall vdso32.so-$(VDSO32-y) += sysenter vdso32-images = $(vdso32.so-y:%=vdso32-%.so) @@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ +VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) GCOV_PROFILE := n diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 9793322751e0..ca94fa649251 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode) notrace static cycle_t vread_tsc(void) { - cycle_t ret; - u64 last; - - /* - * Empirically, a fence (of type that depends on the CPU) - * before rdtsc is enough to ensure that rdtsc is ordered - * with respect to loads. The various CPU manuals are unclear - * as to whether rdtsc can be reordered with later loads, - * but no one has ever seen it happen. - */ - rdtsc_barrier(); - ret = (cycle_t)__native_read_tsc(); - - last = gtod->cycle_last; + cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 last = gtod->cycle_last; if (likely(ret >= last)) return ret; diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1c9f750c3859..434543145d78 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -177,7 +177,7 @@ up_fail: return ret; } -#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static int load_vdso32(void) { int ret; @@ -219,8 +219,11 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, return map_vdso(&vdso_image_x32, true); } #endif - +#ifdef CONFIG_IA32_EMULATION return load_vdso32(); +#else + return 0; +#endif } #endif #else diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 2dcc6ff6fdcc..26a46f44e298 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -290,7 +290,7 @@ static struct vm_area_struct gate_vma = { struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { -#ifdef CONFIG_IA32_EMULATION +#ifdef CONFIG_COMPAT if (!mm || mm->context.ia32_compat) return NULL; #endif diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index ae3a29ae875b..a0a19b7ba22d 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -34,99 +34,6 @@ #include <asm/sys_ia32.h> #include <asm/smap.h> -int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) -{ - int err = 0; - bool ia32 = test_thread_flag(TIF_IA32); - - if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) - return -EFAULT; - - put_user_try { - /* If you change siginfo_t structure, please make sure that - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. */ - put_user_ex(from->si_signo, &to->si_signo); - put_user_ex(from->si_errno, &to->si_errno); - put_user_ex((short)from->si_code, &to->si_code); - - if (from->si_code < 0) { - put_user_ex(from->si_pid, &to->si_pid); - put_user_ex(from->si_uid, &to->si_uid); - put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); - } else { - /* - * First 32bits of unions are always present: - * si_pid === si_band === si_tid === si_addr(LS half) - */ - put_user_ex(from->_sifields._pad[0], - &to->_sifields._pad[0]); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: - break; - case __SI_SYS >> 16: - put_user_ex(from->si_syscall, &to->si_syscall); - put_user_ex(from->si_arch, &to->si_arch); - break; - case __SI_CHLD >> 16: - if (ia32) { - put_user_ex(from->si_utime, &to->si_utime); - put_user_ex(from->si_stime, &to->si_stime); - } else { - put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); - put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); - } - put_user_ex(from->si_status, &to->si_status); - /* FALL THROUGH */ - default: - case __SI_KILL >> 16: - put_user_ex(from->si_uid, &to->si_uid); - break; - case __SI_POLL >> 16: - put_user_ex(from->si_fd, &to->si_fd); - break; - case __SI_TIMER >> 16: - put_user_ex(from->si_overrun, &to->si_overrun); - put_user_ex(ptr_to_compat(from->si_ptr), - &to->si_ptr); - break; - /* This is not generated by the kernel as of now. */ - case __SI_RT >> 16: - case __SI_MESGQ >> 16: - put_user_ex(from->si_uid, &to->si_uid); - put_user_ex(from->si_int, &to->si_int); - break; - } - } - } put_user_catch(err); - - return err; -} - -int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) -{ - int err = 0; - u32 ptr32; - - if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) - return -EFAULT; - - get_user_try { - get_user_ex(to->si_signo, &from->si_signo); - get_user_ex(to->si_errno, &from->si_errno); - get_user_ex(to->si_code, &from->si_code); - - get_user_ex(to->si_pid, &from->si_pid); - get_user_ex(to->si_uid, &from->si_uid); - get_user_ex(ptr32, &from->si_ptr); - to->si_ptr = compat_ptr(ptr32); - } get_user_catch(err); - - return err; -} - /* * Do a signal return; undo the signal stack. */ diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c8393634ca0c..ebf6d5e5668c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -313,7 +313,6 @@ struct apic { /* wakeup_secondary_cpu */ int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); - bool wait_for_init_deassert; void (*inquire_remote_apic)(int apicid); /* apic ops */ @@ -378,7 +377,6 @@ extern struct apic *__apicdrivers[], *__apicdrivers_end[]; * APIC functionality to boot other CPUs - only used on SMP: */ #ifdef CONFIG_SMP -extern atomic_t init_deasserted; extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); #endif diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index 9686c3d9ff73..259a7c1ef709 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -21,7 +21,7 @@ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective * compiler switches. */ -static inline unsigned int __arch_hweight32(unsigned int w) +static __always_inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res = 0; @@ -42,20 +42,23 @@ static inline unsigned int __arch_hweight8(unsigned int w) return __arch_hweight32(w & 0xff); } +#ifdef CONFIG_X86_32 static inline unsigned long __arch_hweight64(__u64 w) { - unsigned long res = 0; - -#ifdef CONFIG_X86_32 return __arch_hweight32((u32)w) + __arch_hweight32((u32)(w >> 32)); +} #else +static __always_inline unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res = 0; + asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); -#endif /* CONFIG_X86_32 */ return res; } +#endif /* CONFIG_X86_32 */ #endif diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index e51a8f803f55..818cb8788225 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -91,15 +91,4 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - */ -static __always_inline void rdtsc_barrier(void) -{ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); -} - #endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h deleted file mode 100644 index 1fe49704b146..000000000000 --- a/arch/x86/include/asm/context_tracking.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_X86_CONTEXT_TRACKING_H -#define _ASM_X86_CONTEXT_TRACKING_H - -#ifdef CONFIG_CONTEXT_TRACKING -# define SCHEDULE_USER call schedule_user -#else -# define SCHEDULE_USER call schedule -#endif - -#endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3d6606fb97d0..477fc28050e4 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -119,6 +119,7 @@ #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ @@ -176,6 +177,7 @@ #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ #define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ /* * Auxiliary flags: Linux defined - For features scattered in various diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 9b3b4f2754c7..36a760bda462 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -4,5 +4,6 @@ #include <asm-generic/delay.h> void use_tsc_delay(void); +void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index f161c189c27b..141c561f4664 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -78,7 +78,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #ifdef CONFIG_X86_64 extern unsigned int vdso64_enabled; #endif -#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) extern unsigned int vdso32_enabled; #endif @@ -187,8 +187,8 @@ static inline void elf_common_init(struct thread_struct *t, #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) -void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); -#define compat_start_thread start_thread_ia32 +void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp); +#define compat_start_thread compat_start_thread void set_personality_ia32(bool); #define COMPAT_SET_PERSONALITY(ex) \ @@ -344,14 +344,9 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, */ static inline int mmap_is_ia32(void) { -#ifdef CONFIG_X86_32 - return 1; -#endif -#ifdef CONFIG_IA32_EMULATION - if (test_thread_flag(TIF_ADDR32)) - return 1; -#endif - return 0; + return config_enabled(CONFIG_X86_32) || + (config_enabled(CONFIG_COMPAT) && + test_thread_flag(TIF_ADDR32)); } /* Do not change the values. See get_align_mask() */ diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index d0e8e0141041..28019765442e 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -22,15 +22,6 @@ struct ucontext_ia32 { compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; -struct ucontext_x32 { - unsigned int uc_flags; - unsigned int uc_link; - compat_stack_t uc_stack; - unsigned int uc__pad0; /* needed for alignment */ - struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ - compat_sigset_t uc_sigmask; /* mask last for extensibility */ -}; - /* This matches struct stat64 in glibc2.2, hence the absolutely * insane amounts of padding around dev_t's. */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4c2d2eb2060a..6ca9fd6234e1 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -117,16 +117,6 @@ #define FPU_IRQ 13 -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 - -#ifndef __ASSEMBLY__ -static inline int invalid_vm86_irq(int irq) -{ - return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; -} -#endif - /* * Size the maximum number of interrupts. * diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 74a2a8dc9908..1410b567ecde 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -1,6 +1,9 @@ #ifndef _ASM_X86_KASAN_H #define _ASM_X86_KASAN_H +#include <linux/const.h> +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + /* * Compiler uses shadow offset assuming that addresses start * from 0. Kernel addresses don't start from 0, so shadow diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49ec9038ec14..c12e845f59e6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -252,6 +252,11 @@ struct kvm_pio_request { int size; }; +struct rsvd_bits_validate { + u64 rsvd_bits_mask[2][4]; + u64 bad_mt_xwr; +}; + /* * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * 32-bit). The kvm_mmu structure abstracts the details of the current mmu @@ -289,8 +294,15 @@ struct kvm_mmu { u64 *pae_root; u64 *lm_root; - u64 rsvd_bits_mask[2][4]; - u64 bad_mt_xwr; + + /* + * check zero bits on shadow page table entries, these + * bits include not only hardware reserved bits but also + * the bits spte never used. + */ + struct rsvd_bits_validate shadow_zero_check; + + struct rsvd_bits_validate guest_rsvd_check; /* * Bitmap: bit set = last pte in walk @@ -358,6 +370,11 @@ struct kvm_mtrr { struct list_head head; }; +/* Hyper-V per vcpu emulation context */ +struct kvm_vcpu_hv { + u64 hv_vapic; +}; + struct kvm_vcpu_arch { /* * rip and regs accesses must go through @@ -514,8 +531,7 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; - /* fields used by HYPER-V emulation */ - u64 hv_vapic; + struct kvm_vcpu_hv hyperv; cpumask_var_t wbinvd_dirty_mask; @@ -586,6 +602,17 @@ struct kvm_apic_map { struct kvm_lapic *logical_map[16][16]; }; +/* Hyper-V emulation context */ +struct kvm_hv { + u64 hv_guest_os_id; + u64 hv_hypercall; + u64 hv_tsc_page; + + /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ + u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; + u64 hv_crash_ctl; +}; + struct kvm_arch { unsigned int n_used_mmu_pages; unsigned int n_requested_mmu_pages; @@ -645,16 +672,14 @@ struct kvm_arch { /* reads protected by irq_srcu, writes by irq_lock */ struct hlist_head mask_notifier_list; - /* fields used by HYPER-V emulation */ - u64 hv_guest_os_id; - u64 hv_hypercall; - u64 hv_tsc_page; + struct kvm_hv hyperv; #ifdef CONFIG_KVM_MMU_AUDIT int audit_point; #endif bool boot_vcpu_runs_old_kvmclock; + u32 bsp_vcpu_id; u64 disabled_quirks; }; @@ -1203,5 +1228,7 @@ int __x86_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); int x86_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); +bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); +bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h index 031f6266f425..0d9b14f60d2c 100644 --- a/arch/x86/include/asm/math_emu.h +++ b/arch/x86/include/asm/math_emu.h @@ -2,7 +2,6 @@ #define _ASM_X86_MATH_EMU_H #include <asm/ptrace.h> -#include <asm/vm86.h> /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved @@ -10,9 +9,6 @@ */ struct math_emu_info { long ___orig_eip; - union { - struct pt_regs *regs; - struct kernel_vm86_regs *vm86; - }; + struct pt_regs *regs; }; #endif /* _ASM_X86_MATH_EMU_H */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 982dfc3679ad..2dbc0bf2b9f3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -151,10 +151,12 @@ extern int mce_p5_enabled; #ifdef CONFIG_X86_MCE int mcheck_init(void); void mcheck_cpu_init(struct cpuinfo_x86 *c); +void mcheck_cpu_clear(struct cpuinfo_x86 *c); void mcheck_vendor_init_severity(void); #else static inline int mcheck_init(void) { return 0; } static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} +static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} static inline void mcheck_vendor_init_severity(void) {} #endif @@ -181,20 +183,18 @@ DECLARE_PER_CPU(struct device *, mce_device); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); +void mce_intel_feature_clear(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); void cmci_rediscover(void); void cmci_recheck(void); -void lmce_clear(void); -void lmce_enable(void); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } +static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { } static inline void cmci_clear(void) {} static inline void cmci_reenable(void) {} static inline void cmci_rediscover(void) {} static inline void cmci_recheck(void) {} -static inline void lmce_clear(void) {} -static inline void lmce_enable(void) {} #endif #ifdef CONFIG_X86_MCE_AMD diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 364d27481a52..55234d5e7160 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,7 +9,9 @@ * we put the segment information here. */ typedef struct { +#ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; +#endif #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 984abfe47edc..379cd3658799 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -33,6 +33,7 @@ static inline void load_mm_cr4(struct mm_struct *mm) static inline void load_mm_cr4(struct mm_struct *mm) {} #endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL /* * ldt_structs can be allocated, used, and freed, but they are never * modified while live. @@ -48,8 +49,23 @@ struct ldt_struct { int size; }; +/* + * Used for LDT copy/destruction. + */ +int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +void destroy_context(struct mm_struct *mm); +#else /* CONFIG_MODIFY_LDT_SYSCALL */ +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + return 0; +} +static inline void destroy_context(struct mm_struct *mm) {} +#endif + static inline void load_mm_ldt(struct mm_struct *mm) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; /* lockless_dereference synchronizes with smp_store_release */ @@ -73,17 +89,13 @@ static inline void load_mm_ldt(struct mm_struct *mm) set_ldt(ldt->entries, ldt->size); else clear_LDT(); +#else + clear_LDT(); +#endif DEBUG_LOCKS_WARN_ON(preemptible()); } -/* - * Used for LDT copy/destruction. - */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void destroy_context(struct mm_struct *mm); - - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP @@ -114,6 +126,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Load per-mm CR4 state */ load_mm_cr4(next); +#ifdef CONFIG_MODIFY_LDT_SYSCALL /* * Load the LDT, if the LDT is different. * @@ -128,6 +141,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ if (unlikely(prev->context.ldt != next->context.ldt)) load_mm_ldt(next); +#endif } #ifdef CONFIG_SMP else { diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index c163215abb9a..aaf59b7da98a 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ struct ms_hyperv_info { u32 features; + u32 misc_features; u32 hints; }; @@ -20,4 +21,8 @@ void hyperv_vector_handler(struct pt_regs *regs); void hv_setup_vmbus_irq(void (*handler)(void)); void hv_remove_vmbus_irq(void); +void hv_setup_kexec_handler(void (*handler)(void)); +void hv_remove_kexec_handler(void); +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); +void hv_remove_crash_handler(void); #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 9ebc3d009373..fcd17c1fc0c6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -73,6 +73,12 @@ #define MSR_LBR_CORE_FROM 0x00000040 #define MSR_LBR_CORE_TO 0x00000060 +#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */ +#define LBR_INFO_MISPRED BIT_ULL(63) +#define LBR_INFO_IN_TX BIT_ULL(62) +#define LBR_INFO_ABORT BIT_ULL(61) +#define LBR_INFO_CYCLES 0xffff + #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 @@ -80,13 +86,21 @@ #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) +#define RTIT_CTL_CYCLEACC BIT(1) #define RTIT_CTL_OS BIT(2) #define RTIT_CTL_USR BIT(3) #define RTIT_CTL_CR3EN BIT(7) #define RTIT_CTL_TOPA BIT(8) +#define RTIT_CTL_MTC_EN BIT(9) #define RTIT_CTL_TSC_EN BIT(10) #define RTIT_CTL_DISRETC BIT(11) #define RTIT_CTL_BRANCH_EN BIT(13) +#define RTIT_CTL_MTC_RANGE_OFFSET 14 +#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) +#define RTIT_CTL_CYC_THRESH_OFFSET 19 +#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) +#define RTIT_CTL_PSB_FREQ_OFFSET 24 +#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) #define MSR_IA32_RTIT_STATUS 0x00000571 #define RTIT_STATUS_CONTEXTEN BIT(1) #define RTIT_STATUS_TRIGGEREN BIT(2) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index e6a707eb5081..77d8b284e4a7 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -47,14 +47,13 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) * it means rax *or* rdx. */ #ifdef CONFIG_X86_64 -#define DECLARE_ARGS(val, low, high) unsigned low, high -#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) -#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) +/* Using 64-bit values saves one instruction clearing the high half of low */ +#define DECLARE_ARGS(val, low, high) unsigned long low, high +#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else #define DECLARE_ARGS(val, low, high) unsigned long long val #define EAX_EDX_VAL(val, low, high) (val) -#define EAX_EDX_ARGS(val, low, high) "A" (val) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif @@ -106,12 +105,19 @@ notrace static inline int native_write_msr_safe(unsigned int msr, return err; } -extern unsigned long long native_read_tsc(void); - extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); -static __always_inline unsigned long long __native_read_tsc(void) +/** + * rdtsc() - returns the current TSC without ordering constraints + * + * rdtsc() returns the result of RDTSC as a 64-bit integer. The + * only ordering constraint it supplies is the ordering implied by + * "asm volatile": it will put the RDTSC in the place you expect. The + * CPU can and will speculatively execute that RDTSC, though, so the + * results can be non-monotonic if compared on different CPUs. + */ +static __always_inline unsigned long long rdtsc(void) { DECLARE_ARGS(val, low, high); @@ -120,6 +126,35 @@ static __always_inline unsigned long long __native_read_tsc(void) return EAX_EDX_VAL(val, low, high); } +/** + * rdtsc_ordered() - read the current TSC in program order + * + * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. + * It is ordered like a load to a global in-memory counter. It should + * be impossible to observe non-monotonic rdtsc_unordered() behavior + * across multiple CPUs as long as the TSC is synced. + */ +static __always_inline unsigned long long rdtsc_ordered(void) +{ + /* + * The RDTSC instruction is not ordered relative to memory + * access. The Intel SDM and the AMD APM are both vague on this + * point, but empirically an RDTSC instruction can be + * speculatively executed before prior loads. An RDTSC + * immediately after an appropriate barrier appears to be + * ordered as a normal load, that is, it provides the same + * ordering guarantees as reading from a global memory location + * that some other imaginary CPU is updating continuously with a + * time stamp. + */ + alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, + "lfence", X86_FEATURE_LFENCE_RDTSC); + return rdtsc(); +} + +/* Deprecated, keep it for a cycle for easier merging: */ +#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0) + static inline unsigned long long native_read_pmc(int counter) { DECLARE_ARGS(val, low, high); @@ -153,8 +188,10 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) -#define wrmsrl(msr, val) \ - native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) +static inline void wrmsrl(unsigned msr, u64 val) +{ + native_write_msr(msr, (u32)val, (u32)(val >> 32)); +} /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) @@ -180,12 +217,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } -#define rdtscl(low) \ - ((low) = (u32)__native_read_tsc()) - -#define rdtscll(val) \ - ((val) = __native_read_tsc()) - #define rdpmc(counter, low, high) \ do { \ u64 _l = native_read_pmc((counter)); \ @@ -195,15 +226,6 @@ do { \ #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) -#define rdtscp(low, high, aux) \ -do { \ - unsigned long long _val = native_read_tscp(&(aux)); \ - (low) = (u32)_val; \ - (high) = (u32)(_val >> 32); \ -} while (0) - -#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) - #endif /* !CONFIG_PARAVIRT */ /* diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 653dfa7662e1..c70689b5e5aa 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -14,6 +14,9 @@ #define CPUID5_ECX_INTERRUPT_BREAK 0x2 #define MWAIT_ECX_INTERRUPT_BREAK 0x1 +#define MWAITX_ECX_TIMER_ENABLE BIT(1) +#define MWAITX_MAX_LOOPS ((u32)-1) +#define MWAITX_DISABLE_CSTATES 0xf static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) @@ -23,6 +26,14 @@ static inline void __monitor(const void *eax, unsigned long ecx, :: "a" (eax), "c" (ecx), "d"(edx)); } +static inline void __monitorx(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitorx %eax, %ecx, %edx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfa;" + :: "a" (eax), "c" (ecx), "d"(edx)); +} + static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ @@ -30,6 +41,40 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) :: "a" (eax), "c" (ecx)); } +/* + * MWAITX allows for a timer expiration to get the core out a wait state in + * addition to the default MWAIT exit condition of a store appearing at a + * monitored virtual address. + * + * Registers: + * + * MWAITX ECX[1]: enable timer if set + * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0 + * frequency is the same as the TSC frequency. + * + * Below is a comparison between MWAIT and MWAITX on AMD processors: + * + * MWAIT MWAITX + * opcode 0f 01 c9 | 0f 01 fb + * ECX[0] value of RFLAGS.IF seen by instruction + * ECX[1] unused/#GP if set | enable timer if set + * ECX[31:2] unused/#GP if set + * EAX unused (reserve for hint) + * EBX[31:0] unused | max wait time (P0 clocks) + * + * MONITOR MONITORX + * opcode 0f 01 c8 | 0f 01 fa + * EAX (logical) address to monitor + * ECX #GP if not zero + */ +static inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) +{ + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" + :: "a" (eax), "b" (ebx), "c" (ecx)); +} + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { trace_hardirqs_on(); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index d143bfad45d7..10d0596433f8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -153,7 +153,11 @@ do { \ val = paravirt_read_msr(msr, &_err); \ } while (0) -#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +static inline void wrmsrl(unsigned msr, u64 val) +{ + wrmsr(msr, (u32)val, (u32)(val>>32)); +} + #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ @@ -174,19 +178,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } -static inline u64 paravirt_read_tsc(void) -{ - return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); -} - -#define rdtscl(low) \ -do { \ - u64 _l = paravirt_read_tsc(); \ - low = (int)_l; \ -} while (0) - -#define rdtscll(val) (val = paravirt_read_tsc()) - static inline unsigned long long paravirt_sched_clock(void) { return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); @@ -215,27 +206,6 @@ do { \ #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) -static inline unsigned long long paravirt_rdtscp(unsigned int *aux) -{ - return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); -} - -#define rdtscp(low, high, aux) \ -do { \ - int __aux; \ - unsigned long __val = paravirt_rdtscp(&__aux); \ - (low) = (u32)__val; \ - (high) = (u32)(__val >> 32); \ - (aux) = __aux; \ -} while (0) - -#define rdtscpll(val, aux) \ -do { \ - unsigned long __aux; \ - val = paravirt_rdtscp(&__aux); \ - (aux) = __aux; \ -} while (0) - static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index a6b8f9fadb06..ce029e4fa7c6 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -156,9 +156,7 @@ struct pv_cpu_ops { u64 (*read_msr)(unsigned int msr, int *err); int (*write_msr)(unsigned int msr, unsigned low, unsigned high); - u64 (*read_tsc)(void); u64 (*read_pmc)(int counter); - unsigned long long (*read_tscp)(unsigned int *aux); #ifdef CONFIG_X86_32 /* diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 164e3f8d3c3d..fa1195dae425 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); -extern bool mp_should_keep_irq(struct device *dev); - struct pci_raw_ops { int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index dc0f6ed35b08..7bcb861a04e5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -159,6 +159,13 @@ struct x86_pmu_capability { */ #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) +#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) +#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62) +#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) +#define GLOBAL_STATUS_ASIF BIT_ULL(60) +#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) +#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58) + /* * IBS cpuid feature detection */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index dca71714f860..b12f81022a6b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!raw_cpu_read_4(__preempt_count)); + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPT diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 944f1785ed0d..19577dd325fa 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -6,8 +6,8 @@ /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; +struct vm86; -#include <asm/vm86.h> #include <asm/math_emu.h> #include <asm/segment.h> #include <asm/types.h> @@ -400,15 +400,9 @@ struct thread_struct { unsigned long cr2; unsigned long trap_nr; unsigned long error_code; -#ifdef CONFIG_X86_32 +#ifdef CONFIG_VM86 /* Virtual 86 mode info */ - struct vm86_struct __user *vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags; - unsigned long v86mask; - unsigned long saved_sp0; - unsigned int saved_fs; - unsigned int saved_gs; + struct vm86 *vm86; #endif /* IO permissions: */ unsigned long *io_bitmap_ptr; @@ -651,14 +645,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) extern void set_task_blockstep(struct task_struct *task, bool on); -/* - * from system description table in BIOS. Mostly for MCA use, but - * others may find it useful: - */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; - /* Boot loader type from the setup header: */ extern int bootloader_type; extern int bootloader_version; @@ -720,7 +706,6 @@ static inline void spin_lock_prefetch(const void *x) #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ - .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 5fabf1362942..6271281f947d 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -88,7 +88,6 @@ extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, unsigned long phase1_result); extern long syscall_trace_enter(struct pt_regs *); -extern void syscall_trace_leave(struct pt_regs *); static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 628954ceede1..7a6bed5c08bc 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) static __always_inline u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) { - u64 delta = __native_read_tsc() - src->tsc_timestamp; + u64 delta = rdtsc_ordered() - src->tsc_timestamp; return pvclock_scale_delta(delta, src->tsc_to_system_mul, src->tsc_shift); } @@ -76,13 +76,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u8 ret_flags; version = src->version; - /* Note: emulated platforms which do not advertise SSE2 support - * result in kvmclock not using the necessary RDTSC barriers. - * Without barriers, it is possible that RDTSC instruction reads from - * the time stamp counter outside rdtsc_barrier protected section - * below, resulting in violation of monotonicity. - */ - rdtsc_barrier(); + offset = pvclock_get_nsec_offset(src); ret = src->system_time + offset; ret_flags = src->flags; diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h index 7c7c27c97daa..1f3175bb994e 100644 --- a/arch/x86/include/asm/sigframe.h +++ b/arch/x86/include/asm/sigframe.h @@ -4,6 +4,7 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> #include <asm/ucontext.h> +#include <linux/compat.h> #ifdef CONFIG_X86_32 #define sigframe_ia32 sigframe @@ -69,6 +70,15 @@ struct rt_sigframe { #ifdef CONFIG_X86_X32_ABI +struct ucontext_x32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + unsigned int uc__pad0; /* needed for alignment */ + struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + struct rt_sigframe_x32 { u64 pretcode; struct ucontext_x32 uc; diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 31eab867e6d3..c481be78fcf1 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -30,7 +30,7 @@ typedef sigset_t compat_sigset_t; #endif /* __ASSEMBLY__ */ #include <uapi/asm/signal.h> #ifndef __ASSEMBLY__ -extern void do_notify_resume(struct pt_regs *, void *, __u32); +extern void do_signal(struct pt_regs *regs); #define __ARCH_HAS_SA_RESTORER diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index c2e00bb2a136..58505f01962f 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void) * on during the bootup the random pool has true entropy too. */ get_random_bytes(&canary, sizeof(canary)); - tsc = __native_read_tsc(); + tsc = rdtsc(); canary += tsc + (tsc << 32UL); current->stack_canary = canary; diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 592a6a672e07..91dfcafe27a6 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -37,6 +37,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *); asmlinkage unsigned long sys_sigreturn(void); /* kernel/vm86_32.c */ +struct vm86_struct; asmlinkage long sys_vm86old(struct vm86_struct __user *); asmlinkage long sys_vm86(unsigned long, unsigned long); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 225ee545e1a0..8afdc3e44247 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -27,14 +27,17 @@ * Without this offset, that can result in a page fault. (We are * careful that, in this case, the value we read doesn't matter.) * - * In vm86 mode, the hardware frame is much longer still, but we neither - * access the extra members from NMI context, nor do we write such a - * frame at sp0 at all. + * In vm86 mode, the hardware frame is much longer still, so add 16 + * bytes to make room for the real-mode segments. * * x86_64 has a fixed-length stack frame. */ #ifdef CONFIG_X86_32 -# define TOP_OF_KERNEL_STACK_PADDING 8 +# ifdef CONFIG_VM86 +# define TOP_OF_KERNEL_STACK_PADDING 16 +# else +# define TOP_OF_KERNEL_STACK_PADDING 8 +# endif #else # define TOP_OF_KERNEL_STACK_PADDING 0 #endif @@ -140,27 +143,11 @@ struct thread_info { _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) -/* work to do in syscall_trace_leave() */ -#define _TIF_WORK_SYSCALL_EXIT \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) - -/* work to do on interrupt/exception return */ -#define _TIF_WORK_MASK \ - (0x0000FFFF & \ - ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \ - _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) - /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) -/* Only used for 64 bit */ -#define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) - /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index c5380bea2a36..c3496619740a 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -112,8 +112,8 @@ asmlinkage void smp_threshold_interrupt(void); asmlinkage void smp_deferred_error_interrupt(void); #endif -extern enum ctx_state ist_enter(struct pt_regs *regs); -extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state); +extern void ist_enter(struct pt_regs *regs); +extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); extern void ist_end_non_atomic(void); diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 94605c0e9cee..6d7c5479bcea 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -21,28 +21,12 @@ extern void disable_TSC(void); static inline cycles_t get_cycles(void) { - unsigned long long ret = 0; - #ifndef CONFIG_X86_TSC if (!cpu_has_tsc) return 0; #endif - rdtscll(ret); - - return ret; -} -static __always_inline cycles_t vget_cycles(void) -{ - /* - * We only do VDSOs on TSC capable CPUs, so this shouldn't - * access boot_cpu_data (which is not VDSO-safe): - */ -#ifndef CONFIG_X86_TSC - if (!cpu_has_tsc) - return 0; -#endif - return (cycles_t)__native_read_tsc(); + return rdtsc(); } extern void tsc_init(void); @@ -51,6 +35,7 @@ extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); extern int check_tsc_disabled(void); extern unsigned long native_calibrate_tsc(void); +extern unsigned long long native_sched_clock_from_tsc(u64 tsc); extern int tsc_clocksource_reliable; diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h index 1d8de3f3feca..1e491f3af317 100644 --- a/arch/x86/include/asm/vm86.h +++ b/arch/x86/include/asm/vm86.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_VM86_H #define _ASM_X86_VM86_H - #include <asm/ptrace.h> #include <uapi/asm/vm86.h> @@ -28,43 +27,49 @@ struct kernel_vm86_regs { unsigned short gs, __gsh; }; -struct kernel_vm86_struct { - struct kernel_vm86_regs regs; -/* - * the below part remains on the kernel stack while we are in VM86 mode. - * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we - * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above - * 'struct kernel_vm86_regs' with the then actual values. - * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' - * in kernelspace, hence we need not reget the data from userspace. - */ -#define VM86_TSS_ESP0 flags +struct vm86 { + struct vm86plus_struct __user *user_vm86; + struct pt_regs regs32; + unsigned long veflags; + unsigned long veflags_mask; + unsigned long saved_sp0; + unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; - struct pt_regs *regs32; /* here we save the pointer to the old regs */ -/* - * The below is not part of the structure, but the stack layout continues - * this way. In front of 'return-eip' may be some data, depending on - * compilation, so we don't rely on this and save the pointer to 'oldregs' - * in 'regs32' above. - * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: - - long return-eip; from call to vm86() - struct pt_regs oldregs; user space registers as saved by syscall - */ }; #ifdef CONFIG_VM86 void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); -struct pt_regs *save_v86_state(struct kernel_vm86_regs *); +void save_v86_state(struct kernel_vm86_regs *, int); struct task_struct; + +#define free_vm86(t) do { \ + struct thread_struct *__t = (t); \ + if (__t->vm86 != NULL) { \ + kfree(__t->vm86); \ + __t->vm86 = NULL; \ + } \ +} while (0) + +/* + * Support for VM86 programs to request interrupts for + * real mode hardware drivers: + */ +#define FIRST_VM86_IRQ 3 +#define LAST_VM86_IRQ 15 + +static inline int invalid_vm86_irq(int irq) +{ + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; +} + void release_vm86_irqs(struct task_struct *); #else @@ -77,6 +82,10 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) return 0; } +static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { } + +#define free_vm86(t) do { } while(0) + #endif /* CONFIG_VM86 */ #endif /* _ASM_X86_VM86_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index da772edd19ab..448b7ca61aee 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -47,6 +47,7 @@ #define CPU_BASED_MOV_DR_EXITING 0x00800000 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define CPU_BASED_USE_IO_BITMAPS 0x02000000 +#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 #define CPU_BASED_MONITOR_EXITING 0x20000000 #define CPU_BASED_PAUSE_EXITING 0x40000000 @@ -367,29 +368,29 @@ enum vmcs_field { #define TYPE_PHYSICAL_APIC_EVENT (10 << 12) #define TYPE_PHYSICAL_APIC_INST (15 << 12) -/* segment AR */ -#define SEGMENT_AR_L_MASK (1 << 13) - -#define AR_TYPE_ACCESSES_MASK 1 -#define AR_TYPE_READABLE_MASK (1 << 1) -#define AR_TYPE_WRITEABLE_MASK (1 << 2) -#define AR_TYPE_CODE_MASK (1 << 3) -#define AR_TYPE_MASK 0x0f -#define AR_TYPE_BUSY_64_TSS 11 -#define AR_TYPE_BUSY_32_TSS 11 -#define AR_TYPE_BUSY_16_TSS 3 -#define AR_TYPE_LDT 2 - -#define AR_UNUSABLE_MASK (1 << 16) -#define AR_S_MASK (1 << 4) -#define AR_P_MASK (1 << 7) -#define AR_L_MASK (1 << 13) -#define AR_DB_MASK (1 << 14) -#define AR_G_MASK (1 << 15) -#define AR_DPL_SHIFT 5 -#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) - -#define AR_RESERVD_MASK 0xfffe0f00 +/* segment AR in VMCS -- these are different from what LAR reports */ +#define VMX_SEGMENT_AR_L_MASK (1 << 13) + +#define VMX_AR_TYPE_ACCESSES_MASK 1 +#define VMX_AR_TYPE_READABLE_MASK (1 << 1) +#define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2) +#define VMX_AR_TYPE_CODE_MASK (1 << 3) +#define VMX_AR_TYPE_MASK 0x0f +#define VMX_AR_TYPE_BUSY_64_TSS 11 +#define VMX_AR_TYPE_BUSY_32_TSS 11 +#define VMX_AR_TYPE_BUSY_16_TSS 3 +#define VMX_AR_TYPE_LDT 2 + +#define VMX_AR_UNUSABLE_MASK (1 << 16) +#define VMX_AR_S_MASK (1 << 4) +#define VMX_AR_P_MASK (1 << 7) +#define VMX_AR_L_MASK (1 << 13) +#define VMX_AR_DB_MASK (1 << 14) +#define VMX_AR_G_MASK (1 << 15) +#define VMX_AR_DPL_SHIFT 5 +#define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3) + +#define VMX_AR_RESERVD_MASK 0xfffe0f00 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index ab456dc233b5..329254373479 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -120,7 +120,7 @@ struct boot_params { __u8 _pad3[16]; /* 0x070 */ __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ - struct sys_desc_table sys_desc_table; /* 0x0a0 */ + struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */ __u32 ext_ramdisk_image; /* 0x0c0 */ __u32 ext_ramdisk_size; /* 0x0c4 */ diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index f36d56bd7632..f0412c50c47b 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -27,6 +27,8 @@ #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) +/* Partition reference TSC MSR is available */ +#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) /* A partition's reference time stamp counter (TSC) page */ #define HV_X64_MSR_REFERENCE_TSC 0x40000021 diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index a0eab85ce7b8..76880ede9a35 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -15,7 +15,8 @@ struct mce { __u64 time; /* wall time_t when error was detected */ __u8 cpuvendor; /* cpu vendor as encoded in system.h */ __u8 inject_flags; /* software inject flags */ - __u16 pad; + __u8 severity; + __u8 usable_addr; __u32 cpuid; /* CPUID 1 EAX */ __u8 cs; /* code segment */ __u8 bank; /* machine check bank */ diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 180a0c3c224d..79887abcb5e1 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -37,8 +37,6 @@ #define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) #define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ #define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) -#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ -#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) #define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) #define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index 1fe92181ee9e..37fee272618f 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -58,6 +58,7 @@ #define EXIT_REASON_INVALID_STATE 33 #define EXIT_REASON_MSR_LOAD_FAIL 34 #define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_TRAP_FLAG 37 #define EXIT_REASON_MONITOR_INSTRUCTION 39 #define EXIT_REASON_PAUSE_INSTRUCTION 40 #define EXIT_REASON_MCE_DURING_VMENTRY 41 @@ -106,6 +107,7 @@ { EXIT_REASON_MSR_READ, "MSR_READ" }, \ { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ + { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0f15af41bd80..514064897d55 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -23,8 +23,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n CFLAGS_irq.o := -I$(src)/../include/asm/trace obj-y := process_$(BITS).o signal.o +obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o -obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o +obj-y += time.o ioport.o dumpstack.o nmi.o +obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e49ee24da85e..75e8bad53798 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -710,7 +710,7 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) #endif } -static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) { int cpu; @@ -726,12 +726,6 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) *pcpu = cpu; return 0; } - -/* wrapper to silence section mismatch warning */ -int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) -{ - return _acpi_map_lsapic(handle, physid, pcpu); -} EXPORT_SYMBOL(acpi_map_cpu); int acpi_unmap_cpu(int cpu) diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index ede92c3364d3..222a57076039 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -263,7 +263,7 @@ static int apbt_clocksource_register(void) /* Verify whether apbt counter works */ t1 = dw_apb_clocksource_read(clocksource_apbt); - rdtscll(start); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -273,7 +273,7 @@ static int apbt_clocksource_register(void) */ do { rep_nop(); - rdtscll(now); + now = rdtsc(); } while ((now - start) < 200000UL); /* APBT is the only always on clocksource, it has to work! */ @@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void) old = dw_apb_clocksource_read(clocksource_apbt); old += loop; - t1 = __native_read_tsc(); + t1 = rdtsc(); do { new = dw_apb_clocksource_read(clocksource_apbt); } while (new < old); - t2 = __native_read_tsc(); + t2 = rdtsc(); shift = 5; if (unlikely(loop >> shift == 0)) { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index dcb52850a28f..5aba9220a5ac 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta, { u64 tsc; - rdtscll(tsc); + tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; } @@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) unsigned long pm = acpi_pm_read_early(); if (cpu_has_tsc) - rdtscll(tsc); + tsc = rdtsc(); switch (lapic_cal_loops++) { case 0: @@ -1209,7 +1209,7 @@ void setup_local_APIC(void) long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) - rdtscll(tsc); + tsc = rdtsc(); if (disable_apic) { disable_ioapic_support(); @@ -1293,7 +1293,7 @@ void setup_local_APIC(void) } if (queued) { if (cpu_has_tsc && cpu_khz) { - rdtscll(ntsc); + ntsc = rdtsc(); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else max_loops--; @@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void) { u64 msr; - if (cpu_has_apic) + if (!cpu_has_apic) return; rdmsrl(MSR_IA32_APICBASE, msr); @@ -1483,10 +1483,13 @@ void x2apic_setup(void) static __init void x2apic_disable(void) { - u32 x2apic_id; + u32 x2apic_id, state = x2apic_state; - if (x2apic_state != X2APIC_ON) - goto out; + x2apic_mode = 0; + x2apic_state = X2APIC_DISABLED; + + if (state != X2APIC_ON) + return; x2apic_id = read_apic_id(); if (x2apic_id >= 255) @@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void) __x2apic_disable(); register_lapic_address(mp_lapic_addr); -out: - x2apic_state = X2APIC_DISABLED; - x2apic_mode = 0; } static __init void x2apic_enable(void) diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index de918c410eae..f92ab36979a2 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -191,7 +191,6 @@ static struct apic apic_flat = { .send_IPI_all = flat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, @@ -299,7 +298,6 @@ static struct apic apic_physflat = { .send_IPI_all = physflat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index b205cdbdbe6a..0d96749cfcac 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -152,7 +152,6 @@ struct apic apic_noop = { .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = noop_apic_read, diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 017149cded07..b548fd3b764b 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -92,7 +92,6 @@ static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); - atomic_set(&init_deasserted, 1); return 0; } @@ -235,7 +234,6 @@ static const struct apic apic_numachip __refconst = { .send_IPI_self = numachip_send_IPI_self, .wakeup_secondary_cpu = numachip_wakeup_secondary, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, /* REMRD not supported */ .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index c4a8d63f8220..971cf8875939 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -186,7 +186,6 @@ static struct apic apic_bigsmp = { .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wait_for_init_deassert = true, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index bda488680dbc..7694ae6c1199 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -111,7 +111,6 @@ static struct apic apic_default = { .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wait_for_init_deassert = true, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index ab3219b3fbda..cc8311c4d298 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) return notifier_from_errno(err); } -static struct notifier_block __refdata x2apic_cpu_notifier = { +static struct notifier_block x2apic_cpu_notifier = { .notifier_call = update_clusterinfo, }; @@ -272,7 +272,6 @@ static struct apic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 3ffd925655e0..662e9150ea6f 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -128,7 +128,6 @@ static struct apic apic_x2apic_phys = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index c8d92950bc04..4a139465f1d4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -248,7 +248,6 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) APIC_DM_STARTUP; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); - atomic_set(&init_deasserted, 1); return 0; } @@ -414,7 +413,6 @@ static struct apic __refdata apic_x2apic_uv_x = { .send_IPI_self = uv_send_IPI_self, .wakeup_secondary_cpu = uv_wakeup_secondary, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 927ec9235947..052c9c3026cc 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -919,7 +919,7 @@ recalc: } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; - idle_percentage = stime - last_stime; + idle_percentage = cputime_to_jiffies(stime - last_stime); idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 9bff68798836..4eb065c6bed2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -46,6 +46,8 @@ obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ perf_event_intel_uncore_snb.o \ perf_event_intel_uncore_snbep.o \ perf_event_intel_uncore_nhmex.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_msr.o +obj-$(CONFIG_CPU_SUP_AMD) += perf_event_msr.o endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index dd3a4baffe50..4a70fc6d400a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -11,6 +11,7 @@ #include <asm/cpu.h> #include <asm/smp.h> #include <asm/pci-direct.h> +#include <asm/delay.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> @@ -114,7 +115,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); - unsigned long d, d2; + u64 d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); @@ -125,10 +126,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c) n = K6_BUG_LOOP; f_vide = vide; - rdtscl(d); + d = rdtsc(); while (n--) f_vide(); - rdtscl(d2); + d2 = rdtsc(); d = d2-d; if (d > 20*K6_BUG_LOOP) @@ -506,6 +507,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) /* A random value per boot for bit slice [12:upper_bit) */ va_align.bits = get_random_int() & va_align.mask; } + + if (cpu_has(c, X86_FEATURE_MWAITX)) + use_mwaitx_delay(); } static void early_init_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cb9e5df42dd2..07ce52c22ec8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -13,6 +13,7 @@ #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/io.h> +#include <linux/syscore_ops.h> #include <asm/stackprotector.h> #include <asm/perf_event.h> @@ -1185,10 +1186,10 @@ void syscall_init(void) * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. */ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); - wrmsrl(MSR_LSTAR, entry_SYSCALL_64); + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION - wrmsrl(MSR_CSTAR, entry_SYSCALL_compat); + wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. @@ -1199,7 +1200,7 @@ void syscall_init(void) wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else - wrmsrl(MSR_CSTAR, ignore_sysret); + wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); @@ -1488,3 +1489,20 @@ inline bool __static_cpu_has_safe(u16 bit) return boot_cpu_has(bit); } EXPORT_SYMBOL_GPL(__static_cpu_has_safe); + +static void bsp_resume(void) +{ + if (this_cpu->c_bsp_resume) + this_cpu->c_bsp_resume(&boot_cpu_data); +} + +static struct syscore_ops cpu_syscore_ops = { + .resume = bsp_resume, +}; + +static int __init init_cpu_syscore(void) +{ + register_syscore_ops(&cpu_syscore_ops); + return 0; +} +core_initcall(init_cpu_syscore); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index c37dc37e8317..2584265d4745 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -13,6 +13,7 @@ struct cpu_dev { void (*c_init)(struct cpuinfo_x86 *); void (*c_identify)(struct cpuinfo_x86 *); void (*c_detect_tlb)(struct cpuinfo_x86 *); + void (*c_bsp_resume)(struct cpuinfo_x86 *); int c_x86_vendor; #ifdef CONFIG_X86_32 /* Optional vendor specific routine to obtain the cache size. */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 50163fa9034f..98a13db5f4be 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -371,6 +371,36 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) } } +static void init_intel_energy_perf(struct cpuinfo_x86 *c) +{ + u64 epb; + + /* + * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. + * (x86_energy_perf_policy(8) is available to change it at run-time.) + */ + if (!cpu_has(c, X86_FEATURE_EPB)) + return; + + rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) + return; + + pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); + pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); + epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; + wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); +} + +static void intel_bsp_resume(struct cpuinfo_x86 *c) +{ + /* + * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, + * so reinitialize it properly like during bootup: + */ + init_intel_energy_perf(c); +} + static void init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; @@ -478,21 +508,7 @@ static void init_intel(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); - /* - * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. - * x86_energy_perf_policy(8) is available to change it at run-time - */ - if (cpu_has(c, X86_FEATURE_EPB)) { - u64 epb; - - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); - if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { - pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); - pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); - epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; - wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); - } - } + init_intel_energy_perf(c); } #ifdef CONFIG_X86_32 @@ -747,6 +763,7 @@ static const struct cpu_dev intel_cpu_dev = { .c_detect_tlb = intel_detect_tlb, .c_early_init = early_init_intel, .c_init = init_intel, + .c_bsp_resume = intel_bsp_resume, .c_x86_vendor = X86_VENDOR_INTEL, }; diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/kernel/cpu/intel_pt.h index 1c338b0eba05..336878a5d205 100644 --- a/arch/x86/kernel/cpu/intel_pt.h +++ b/arch/x86/kernel/cpu/intel_pt.h @@ -25,32 +25,11 @@ */ #define TOPA_PMI_MARGIN 512 -/* - * Table of Physical Addresses bits - */ -enum topa_sz { - TOPA_4K = 0, - TOPA_8K, - TOPA_16K, - TOPA_32K, - TOPA_64K, - TOPA_128K, - TOPA_256K, - TOPA_512K, - TOPA_1MB, - TOPA_2MB, - TOPA_4MB, - TOPA_8MB, - TOPA_16MB, - TOPA_32MB, - TOPA_64MB, - TOPA_128MB, - TOPA_SZ_END, -}; +#define TOPA_SHIFT 12 -static inline unsigned int sizes(enum topa_sz tsz) +static inline unsigned int sizes(unsigned int tsz) { - return 1 << (tsz + 12); + return 1 << (tsz + TOPA_SHIFT); }; struct topa_entry { @@ -66,20 +45,26 @@ struct topa_entry { u64 rsvd4 : 16; }; -#define TOPA_SHIFT 12 -#define PT_CPUID_LEAVES 2 +#define PT_CPUID_LEAVES 2 +#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ enum pt_capabilities { PT_CAP_max_subleaf = 0, PT_CAP_cr3_filtering, + PT_CAP_psb_cyc, + PT_CAP_mtc, PT_CAP_topa_output, PT_CAP_topa_multiple_entries, + PT_CAP_single_range_output, PT_CAP_payloads_lip, + PT_CAP_mtc_periods, + PT_CAP_cycle_thresholds, + PT_CAP_psb_periods, }; struct pt_pmu { struct pmu pmu; - u32 caps[4 * PT_CPUID_LEAVES]; + u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; }; /** diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index bb34b03af252..a3311c886194 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -1,4 +1,4 @@ -obj-y = mce.o mce-severity.o +obj-y = mce.o mce-severity.o mce-genpool.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c index a1aef9533154..34c89a3e8260 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c @@ -57,7 +57,6 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) m.addr = mem_err->physical_addr; mce_log(&m); - mce_notify_irq(); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c new file mode 100644 index 000000000000..0a850100c594 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c @@ -0,0 +1,99 @@ +/* + * MCE event pool management in MCE context + * + * Copyright (C) 2015 Intel Corp. + * Author: Chen, Gong <gong.chen@linux.intel.com> + * + * This file is licensed under GPLv2. + */ +#include <linux/smp.h> +#include <linux/mm.h> +#include <linux/genalloc.h> +#include <linux/llist.h> +#include "mce-internal.h" + +/* + * printk() is not safe in MCE context. This is a lock-less memory allocator + * used to save error information organized in a lock-less list. + * + * This memory pool is only to be used to save MCE records in MCE context. + * MCE events are rare, so a fixed size memory pool should be enough. Use + * 2 pages to save MCE events for now (~80 MCE records at most). + */ +#define MCE_POOLSZ (2 * PAGE_SIZE) + +static struct gen_pool *mce_evt_pool; +static LLIST_HEAD(mce_event_llist); +static char gen_pool_buf[MCE_POOLSZ]; + +void mce_gen_pool_process(void) +{ + struct llist_node *head; + struct mce_evt_llist *node; + struct mce *mce; + + head = llist_del_all(&mce_event_llist); + if (!head) + return; + + head = llist_reverse_order(head); + llist_for_each_entry(node, head, llnode) { + mce = &node->mce; + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); + gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); + } +} + +bool mce_gen_pool_empty(void) +{ + return llist_empty(&mce_event_llist); +} + +int mce_gen_pool_add(struct mce *mce) +{ + struct mce_evt_llist *node; + + if (!mce_evt_pool) + return -EINVAL; + + node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node)); + if (!node) { + pr_warn_ratelimited("MCE records pool full!\n"); + return -ENOMEM; + } + + memcpy(&node->mce, mce, sizeof(*mce)); + llist_add(&node->llnode, &mce_event_llist); + + return 0; +} + +static int mce_gen_pool_create(void) +{ + struct gen_pool *tmpp; + int ret = -ENOMEM; + + tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1); + if (!tmpp) + goto out; + + ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1); + if (ret) { + gen_pool_destroy(tmpp); + goto out; + } + + mce_evt_pool = tmpp; + +out: + return ret; +} + +int mce_gen_pool_init(void) +{ + /* Just init mce_gen_pool once. */ + if (mce_evt_pool) + return 0; + + return mce_gen_pool_create(); +} diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index fe32074b865b..547720efd923 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -13,6 +13,8 @@ enum severity_level { MCE_PANIC_SEVERITY, }; +extern struct atomic_notifier_head x86_mce_decoder_chain; + #define ATTR_LEN 16 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ @@ -24,6 +26,16 @@ struct mce_bank { char attrname[ATTR_LEN]; /* attribute name */ }; +struct mce_evt_llist { + struct llist_node llnode; + struct mce mce; +}; + +void mce_gen_pool_process(void); +bool mce_gen_pool_empty(void); +int mce_gen_pool_add(struct mce *mce); +int mce_gen_pool_init(void); + extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); struct dentry *mce_get_debugfs_dir(void); @@ -67,3 +79,5 @@ static inline int apei_clear_mce(u64 record_id) return -EINVAL; } #endif + +void mce_inject_log(struct mce *m); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index df919ff103c3..9d014b82a124 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -52,11 +52,11 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); -#define rcu_dereference_check_mce(p) \ +#define mce_log_get_idx_check(p) \ ({ \ - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ - lockdep_is_held(&mce_chrdev_read_mutex), \ - "suspicious rcu_dereference_check_mce() usage"); \ + RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + !lockdep_is_held(&mce_chrdev_read_mutex), \ + "suspicious mce_log_get_idx_check() usage"); \ smp_load_acquire(&(p)); \ }) @@ -110,22 +110,24 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { */ mce_banks_t mce_banks_ce_disabled; -static DEFINE_PER_CPU(struct work_struct, mce_work); +static struct work_struct mce_work; +static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +static int mce_usable_address(struct mce *m); /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. */ -static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); +ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); /* Do initial initialization of a struct mce */ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); - rdtscll(m->tsc); + m->tsc = rdtsc(); /* We hope get_seconds stays lockless */ m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; @@ -157,12 +159,13 @@ void mce_log(struct mce *mce) /* Emit the trace record: */ trace_mce_record(mce); - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); + if (!mce_gen_pool_add(mce)) + irq_work_queue(&mce_irq_work); mce->finished = 0; wmb(); for (;;) { - entry = rcu_dereference_check_mce(mcelog.next); + entry = mce_log_get_idx_check(mcelog.next); for (;;) { /* @@ -196,48 +199,23 @@ void mce_log(struct mce *mce) set_bit(0, &mce_need_notify); } -static void drain_mcelog_buffer(void) +void mce_inject_log(struct mce *m) { - unsigned int next, i, prev = 0; - - next = ACCESS_ONCE(mcelog.next); - - do { - struct mce *m; - - /* drain what was logged during boot */ - for (i = prev; i < next; i++) { - unsigned long start = jiffies; - unsigned retries = 1; - - m = &mcelog.entry[i]; - - while (!m->finished) { - if (time_after_eq(jiffies, start + 2*retries)) - retries++; - - cpu_relax(); - - if (!m->finished && retries >= 4) { - pr_err("skipping error being logged currently!\n"); - break; - } - } - smp_rmb(); - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); - } - - memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); - prev = next; - next = cmpxchg(&mcelog.next, prev, 0); - } while (next != prev); + mutex_lock(&mce_chrdev_read_mutex); + mce_log(m); + mutex_unlock(&mce_chrdev_read_mutex); } +EXPORT_SYMBOL_GPL(mce_inject_log); +static struct notifier_block mce_srao_nb; void mce_register_decode_chain(struct notifier_block *nb) { + /* Ensure SRAO notifier has the highest priority in the decode chain. */ + if (nb != &mce_srao_nb && nb->priority == INT_MAX) + nb->priority -= 1; + atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); - drain_mcelog_buffer(); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); @@ -461,61 +439,6 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) } } -/* - * Simple lockless ring to communicate PFNs from the exception handler with the - * process context work function. This is vastly simplified because there's - * only a single reader and a single writer. - */ -#define MCE_RING_SIZE 16 /* we use one entry less */ - -struct mce_ring { - unsigned short start; - unsigned short end; - unsigned long ring[MCE_RING_SIZE]; -}; -static DEFINE_PER_CPU(struct mce_ring, mce_ring); - -/* Runs with CPU affinity in workqueue */ -static int mce_ring_empty(void) -{ - struct mce_ring *r = this_cpu_ptr(&mce_ring); - - return r->start == r->end; -} - -static int mce_ring_get(unsigned long *pfn) -{ - struct mce_ring *r; - int ret = 0; - - *pfn = 0; - get_cpu(); - r = this_cpu_ptr(&mce_ring); - if (r->start == r->end) - goto out; - *pfn = r->ring[r->start]; - r->start = (r->start + 1) % MCE_RING_SIZE; - ret = 1; -out: - put_cpu(); - return ret; -} - -/* Always runs in MCE context with preempt off */ -static int mce_ring_add(unsigned long pfn) -{ - struct mce_ring *r = this_cpu_ptr(&mce_ring); - unsigned next; - - next = (r->end + 1) % MCE_RING_SIZE; - if (next == r->start) - return -1; - r->ring[r->end] = pfn; - wmb(); - r->end = next; - return 0; -} - int mce_available(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) @@ -525,12 +448,10 @@ int mce_available(struct cpuinfo_x86 *c) static void mce_schedule_work(void) { - if (!mce_ring_empty()) - schedule_work(this_cpu_ptr(&mce_work)); + if (!mce_gen_pool_empty() && keventd_up()) + schedule_work(&mce_work); } -static DEFINE_PER_CPU(struct irq_work, mce_irq_work); - static void mce_irq_work_cb(struct irq_work *entry) { mce_notify_irq(); @@ -551,8 +472,29 @@ static void mce_report_event(struct pt_regs *regs) return; } - irq_work_queue(this_cpu_ptr(&mce_irq_work)); + irq_work_queue(&mce_irq_work); +} + +static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + unsigned long pfn; + + if (!mce) + return NOTIFY_DONE; + + if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) { + pfn = mce->addr >> PAGE_SHIFT; + memory_failure(pfn, MCE_VECTOR, 0); + } + + return NOTIFY_OK; } +static struct notifier_block mce_srao_nb = { + .notifier_call = srao_decode_notifier, + .priority = INT_MAX, +}; /* * Read ADDR and MISC registers. @@ -672,8 +614,11 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) */ if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) { if (m.status & MCI_STATUS_ADDRV) { - mce_ring_add(m.addr >> PAGE_SHIFT); - mce_schedule_work(); + m.severity = severity; + m.usable_addr = mce_usable_address(&m); + + if (!mce_gen_pool_add(&m)) + mce_schedule_work(); } } @@ -1029,7 +974,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) { struct mca_config *cfg = &mca_cfg; struct mce m, *final; - enum ctx_state prev_state; int i; int worst = 0; int severity; @@ -1055,7 +999,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) int flags = MF_ACTION_REQUIRED; int lmce = 0; - prev_state = ist_enter(regs); + ist_enter(regs); this_cpu_inc(mce_exception_count); @@ -1143,15 +1087,9 @@ void do_machine_check(struct pt_regs *regs, long error_code) mce_read_aux(&m, i); - /* - * Action optional error. Queue address for later processing. - * When the ring overflows we just ignore the AO error. - * RED-PEN add some logging mechanism when - * usable_address or mce_add_ring fails. - * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 - */ - if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) - mce_ring_add(m.addr >> PAGE_SHIFT); + /* assuming valid severity level != 0 */ + m.severity = severity; + m.usable_addr = mce_usable_address(&m); mce_log(&m); @@ -1227,7 +1165,7 @@ out: local_irq_disable(); ist_end_non_atomic(); done: - ist_exit(regs, prev_state); + ist_exit(regs); } EXPORT_SYMBOL_GPL(do_machine_check); @@ -1247,14 +1185,11 @@ int memory_failure(unsigned long pfn, int vector, int flags) /* * Action optional processing happens here (picking up * from the list of faulting pages that do_machine_check() - * placed into the "ring"). + * placed into the genpool). */ static void mce_process_work(struct work_struct *dummy) { - unsigned long pfn; - - while (mce_ring_get(&pfn)) - memory_failure(pfn, MCE_VECTOR, 0); + mce_gen_pool_process(); } #ifdef CONFIG_X86_MCE_INTEL @@ -1678,6 +1613,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } +static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) +{ + switch (c->x86_vendor) { + case X86_VENDOR_INTEL: + mce_intel_feature_clear(c); + break; + default: + break; + } +} + static void mce_start_timer(unsigned int cpu, struct timer_list *t) { unsigned long iv = check_interval * HZ; @@ -1731,13 +1677,36 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) return; } + if (mce_gen_pool_init()) { + mca_cfg.disabled = true; + pr_emerg("Couldn't allocate MCE records pool!\n"); + return; + } + machine_check_vector = do_machine_check; __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); __mcheck_cpu_init_timer(); - INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); - init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); +} + +/* + * Called for each booted CPU to clear some machine checks opt-ins + */ +void mcheck_cpu_clear(struct cpuinfo_x86 *c) +{ + if (mca_cfg.disabled) + return; + + if (!mce_available(c)) + return; + + /* + * Possibly to clear general settings generic to x86 + * __mcheck_cpu_clear_generic(c); + */ + __mcheck_cpu_clear_vendor(c); + } /* @@ -1784,7 +1753,7 @@ static void collect_tscs(void *data) { unsigned long *cpu_tsc = (unsigned long *)data; - rdtscll(cpu_tsc[smp_processor_id()]); + cpu_tsc[smp_processor_id()] = rdtsc(); } static int mce_apei_read_done; @@ -1850,7 +1819,7 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, goto out; } - next = rcu_dereference_check_mce(mcelog.next); + next = mce_log_get_idx_check(mcelog.next); /* Only supports full reads right now */ err = -EINVAL; @@ -2056,8 +2025,12 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { mcheck_intel_therm_init(); + mce_register_decode_chain(&mce_srao_nb); mcheck_vendor_init_severity(); + INIT_WORK(&mce_work, mce_process_work); + init_irq_work(&mce_irq_work, mce_irq_work_cb); + return 0; } @@ -2591,5 +2564,20 @@ static int __init mcheck_debugfs_init(void) return 0; } -late_initcall(mcheck_debugfs_init); +#else +static int __init mcheck_debugfs_init(void) { return -EINVAL; } #endif + +static int __init mcheck_late_init(void) +{ + mcheck_debugfs_init(); + + /* + * Flush out everything that has been logged during early boot, now that + * everything has been initialized (workqueues, decoders, ...). + */ + mce_schedule_work(); + + return 0; +} +late_initcall(mcheck_late_init); diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 844f56c5616d..1e8bb6c94f14 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -146,6 +146,27 @@ void mce_intel_hcpu_update(unsigned long cpu) per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; } +static void cmci_toggle_interrupt_mode(bool on) +{ + unsigned long flags, *owned; + int bank; + u64 val; + + raw_spin_lock_irqsave(&cmci_discover_lock, flags); + owned = this_cpu_ptr(mce_banks_owned); + for_each_set_bit(bank, owned, MAX_NR_BANKS) { + rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + + if (on) + val |= MCI_CTL2_CMCI_EN; + else + val &= ~MCI_CTL2_CMCI_EN; + + wrmsrl(MSR_IA32_MCx_CTL2(bank), val); + } + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); +} + unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && @@ -175,7 +196,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); - cmci_reenable(); + cmci_toggle_interrupt_mode(true); cmci_recheck(); } return CMCI_POLL_INTERVAL; @@ -186,22 +207,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) } } -static void cmci_storm_disable_banks(void) -{ - unsigned long flags, *owned; - int bank; - u64 val; - - raw_spin_lock_irqsave(&cmci_discover_lock, flags); - owned = this_cpu_ptr(mce_banks_owned); - for_each_set_bit(bank, owned, MAX_NR_BANKS) { - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); - val &= ~MCI_CTL2_CMCI_EN; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); - } - raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); -} - static bool cmci_storm_detect(void) { unsigned int cnt = __this_cpu_read(cmci_storm_cnt); @@ -223,7 +228,7 @@ static bool cmci_storm_detect(void) if (cnt <= CMCI_STORM_THRESHOLD) return false; - cmci_storm_disable_banks(); + cmci_toggle_interrupt_mode(false); __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); r = atomic_add_return(1, &cmci_storm_on_cpus); mce_timer_kick(CMCI_STORM_INTERVAL); @@ -246,7 +251,6 @@ static void intel_threshold_interrupt(void) return; machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); - mce_notify_irq(); } /* @@ -435,7 +439,7 @@ static void intel_init_cmci(void) cmci_recheck(); } -void intel_init_lmce(void) +static void intel_init_lmce(void) { u64 val; @@ -448,9 +452,26 @@ void intel_init_lmce(void) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } +static void intel_clear_lmce(void) +{ + u64 val; + + if (!lmce_supported()) + return; + + rdmsrl(MSR_IA32_MCG_EXT_CTL, val); + val &= ~MCG_EXT_CTL_LMCE_EN; + wrmsrl(MSR_IA32_MCG_EXT_CTL, val); +} + void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); intel_init_lmce(); } + +void mce_intel_feature_clear(struct cpuinfo_x86 *c) +{ + intel_clear_lmce(); +} diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 737b0ad4e61a..12402e10aeff 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -19,10 +19,9 @@ int mce_p5_enabled __read_mostly; /* Machine check handler for Pentium class Intel CPUs: */ static void pentium_machine_check(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; u32 loaddr, hi, lotype; - prev_state = ist_enter(regs); + ist_enter(regs); rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); @@ -39,7 +38,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs, prev_state); + ist_exit(regs); } /* Set up machine check reporting for processors with Intel style MCE: */ diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 44f138296fbe..01dd8702880b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -15,12 +15,12 @@ /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state = ist_enter(regs); + ist_enter(regs); printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs, prev_state); + ist_exit(regs); } /* Set up machine check reporting on the Winchip C6 series */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 6236a54a63f4..9e3f3c7dd5d7 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -377,17 +377,16 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif) return err; } -static int mc_device_remove(struct device *dev, struct subsys_interface *sif) +static void mc_device_remove(struct device *dev, struct subsys_interface *sif) { int cpu = dev->id; if (!cpu_online(cpu)) - return 0; + return; pr_debug("CPU%d removed\n", cpu); microcode_fini_cpu(cpu); sysfs_remove_group(&dev->kobj, &mc_attr_group); - return 0; } static struct subsys_interface mc_cpu_interface = { @@ -460,7 +459,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block __refdata mc_cpu_notifier = { +static struct notifier_block mc_cpu_notifier = { .notifier_call = mc_cpu_callback, }; diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 8187b7247d1c..37ea89c11520 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -390,7 +390,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) } #ifdef DEBUG -static void __ref show_saved_mc(void) +static void show_saved_mc(void) { int i, j; unsigned int sig, pf, rev, total_size, data_size, date; diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index aad4bd84b475..f794bfa3c138 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -18,6 +18,7 @@ #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/kexec.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv.h> @@ -28,10 +29,14 @@ #include <asm/i8259.h> #include <asm/apic.h> #include <asm/timer.h> +#include <asm/reboot.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); + #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); @@ -67,8 +72,47 @@ void hv_remove_vmbus_irq(void) } EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq); EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq); + +void hv_setup_kexec_handler(void (*handler)(void)) +{ + hv_kexec_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); + +void hv_remove_kexec_handler(void) +{ + hv_kexec_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); + +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) +{ + hv_crash_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_crash_handler); + +void hv_remove_crash_handler(void) +{ + hv_crash_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_crash_handler); #endif +static void hv_machine_shutdown(void) +{ + if (kexec_in_progress && hv_kexec_handler) + hv_kexec_handler(); + native_machine_shutdown(); +} + +static void hv_machine_crash_shutdown(struct pt_regs *regs) +{ + if (hv_crash_handler) + hv_crash_handler(regs); + native_machine_crash_shutdown(regs); +} + + static uint32_t __init ms_hyperv_platform(void) { u32 eax; @@ -114,6 +158,7 @@ static void __init ms_hyperv_init_platform(void) * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); + ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", @@ -141,6 +186,8 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif + machine_ops.shutdown = hv_machine_shutdown; + machine_ops.crash_shutdown = hv_machine_crash_shutdown; } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9469dfa55607..66dd3fe99b82 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1551,7 +1551,7 @@ static void __init filter_events(struct attribute **attrs) } /* Merge two pointer arrays */ -static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) +__init struct attribute **merge_attr(struct attribute **a, struct attribute **b) { struct attribute **new; int j, i; @@ -2179,6 +2179,7 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; if (idx > LDT_ENTRIES) @@ -2190,6 +2191,9 @@ static unsigned long get_segment_base(unsigned int segment) return 0; desc = &ldt->entries[idx]; +#else + return 0; +#endif } else { if (idx > GDT_ENTRIES) return 0; @@ -2200,7 +2204,7 @@ static unsigned long get_segment_base(unsigned int segment) return get_desc_base(desc); } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_IA32_EMULATION #include <asm/compat.h> diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 3e7fd27dfe20..5edf6d868fc1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -165,7 +165,7 @@ struct intel_excl_cntrs { unsigned core_id; /* per-core: core id */ }; -#define MAX_LBR_ENTRIES 16 +#define MAX_LBR_ENTRIES 32 enum { X86_PERF_KFREE_SHARED = 0, @@ -594,6 +594,7 @@ struct x86_pmu { struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); int max_pebs_events; + unsigned long free_running_flags; /* * Intel LBR @@ -624,6 +625,7 @@ struct x86_pmu { struct x86_perf_task_context { u64 lbr_from[MAX_LBR_ENTRIES]; u64 lbr_to[MAX_LBR_ENTRIES]; + u64 lbr_info[MAX_LBR_ENTRIES]; int lbr_callstack_users; int lbr_stack_state; }; @@ -793,6 +795,8 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); ssize_t intel_event_sysfs_show(char *page, u64 config); +struct attribute **merge_attr(struct attribute **a, struct attribute **b); + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); @@ -808,20 +812,6 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL -static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) -{ - /* user explicitly requested branch sampling */ - if (has_branch_stack(event)) - return true; - - /* implicit branch sampling to correct PEBS skid */ - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && - x86_pmu.intel_cap.pebs_format < 2) - return true; - - return false; -} - static inline bool intel_pmu_has_bts(struct perf_event *event) { if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && @@ -873,6 +863,8 @@ extern struct event_constraint intel_ivb_pebs_event_constraints[]; extern struct event_constraint intel_hsw_pebs_event_constraints[]; +extern struct event_constraint intel_skl_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event); @@ -911,6 +903,8 @@ void intel_pmu_lbr_init_snb(void); void intel_pmu_lbr_init_hsw(void); +void intel_pmu_lbr_init_skl(void); + int intel_pmu_setup_lbr_filter(struct perf_event *event); void intel_pt_interrupt(void); @@ -934,6 +928,7 @@ static inline int is_ht_workaround_enabled(void) { return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); } + #else /* CONFIG_CPU_SUP_INTEL */ static inline void reserve_ds_buffers(void) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6326ae24e4d5..3f124d553c5a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -177,6 +177,14 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +struct event_constraint intel_skl_event_constraints[] = { + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ + EVENT_CONSTRAINT_END +}; + static struct extra_reg intel_snb_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), @@ -193,6 +201,13 @@ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +static struct extra_reg intel_skl_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + EVENT_EXTRA_END +}; + EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); @@ -244,6 +259,200 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } +/* + * Notes on the events: + * - data reads do not include code reads (comparable to earlier tables) + * - data counts include speculative execution (except L1 write, dtlb, bpu) + * - remote node access includes remote memory, remote cache, remote mmio. + * - prefetches are not included in the counts. + * - icache miss does not include decoded icache + */ + +#define SKL_DEMAND_DATA_RD BIT_ULL(0) +#define SKL_DEMAND_RFO BIT_ULL(1) +#define SKL_ANY_RESPONSE BIT_ULL(16) +#define SKL_SUPPLIER_NONE BIT_ULL(17) +#define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) +#define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) +#define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) +#define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) +#define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ + SKL_L3_MISS_REMOTE_HOP0_DRAM| \ + SKL_L3_MISS_REMOTE_HOP1_DRAM| \ + SKL_L3_MISS_REMOTE_HOP2P_DRAM) +#define SKL_SPL_HIT BIT_ULL(30) +#define SKL_SNOOP_NONE BIT_ULL(31) +#define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) +#define SKL_SNOOP_MISS BIT_ULL(33) +#define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) +#define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) +#define SKL_SNOOP_HITM BIT_ULL(36) +#define SKL_SNOOP_NON_DRAM BIT_ULL(37) +#define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ + SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ + SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ + SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) +#define SKL_DEMAND_READ SKL_DEMAND_DATA_RD +#define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ + SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ + SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ + SKL_SNOOP_HITM|SKL_SPL_HIT) +#define SKL_DEMAND_WRITE SKL_DEMAND_RFO +#define SKL_LLC_ACCESS SKL_ANY_RESPONSE +#define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ + SKL_L3_MISS_REMOTE_HOP1_DRAM| \ + SKL_L3_MISS_REMOTE_HOP2P_DRAM) + +static __initconst const u64 skl_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ + [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ + [ C(RESULT_MISS) ] = 0x0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ + [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ + [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ + [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, +}; + +static __initconst const u64 skl_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| + SKL_LLC_ACCESS|SKL_ANY_SNOOP, + [ C(RESULT_MISS) ] = SKL_DEMAND_READ| + SKL_L3_MISS|SKL_ANY_SNOOP| + SKL_SUPPLIER_NONE, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| + SKL_LLC_ACCESS|SKL_ANY_SNOOP, + [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS|SKL_ANY_SNOOP| + SKL_SUPPLIER_NONE, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| + SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, + [ C(RESULT_MISS) ] = SKL_DEMAND_READ| + SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, + [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, +}; + #define SNB_DMND_DATA_RD (1ULL << 0) #define SNB_DMND_RFO (1ULL << 1) #define SNB_DMND_IFETCH (1ULL << 2) @@ -1114,7 +1323,7 @@ static struct extra_reg intel_slm_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), - INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), EVENT_EXTRA_END }; @@ -1594,6 +1803,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) loops = 0; again: + intel_pmu_lbr_read(); intel_pmu_ack_status(status); if (++loops > 100) { static bool warned = false; @@ -1608,16 +1818,16 @@ again: inc_irq_stat(apic_perf_irqs); - intel_pmu_lbr_read(); /* - * CondChgd bit 63 doesn't mean any overflow status. Ignore - * and clear the bit. + * Ignore a range of extra bits in status that do not indicate + * overflow by themselves. */ - if (__test_and_clear_bit(63, (unsigned long *)&status)) { - if (!status) - goto done; - } + status &= ~(GLOBAL_STATUS_COND_CHG | + GLOBAL_STATUS_ASIF | + GLOBAL_STATUS_LBRS_FROZEN); + if (!status) + goto done; /* * PEBS overflow sets bit 62 in the global status register @@ -1699,18 +1909,22 @@ intel_bts_constraints(struct perf_event *event) return NULL; } -static int intel_alt_er(int idx) +static int intel_alt_er(int idx, u64 config) { + int alt_idx; if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) return idx; if (idx == EXTRA_REG_RSP_0) - return EXTRA_REG_RSP_1; + alt_idx = EXTRA_REG_RSP_1; if (idx == EXTRA_REG_RSP_1) - return EXTRA_REG_RSP_0; + alt_idx = EXTRA_REG_RSP_0; - return idx; + if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask) + return idx; + + return alt_idx; } static void intel_fixup_er(struct perf_event *event, int idx) @@ -1799,7 +2013,7 @@ again: */ c = NULL; } else { - idx = intel_alt_er(idx); + idx = intel_alt_er(idx, reg->config); if (idx != reg->idx) { raw_spin_unlock_irqrestore(&era->lock, flags); goto again; @@ -2253,6 +2467,15 @@ static void intel_pebs_aliases_snb(struct perf_event *event) } } +static unsigned long intel_pmu_free_running_flags(struct perf_event *event) +{ + unsigned long flags = x86_pmu.free_running_flags; + + if (event->attr.use_clockid) + flags &= ~PERF_SAMPLE_TIME; + return flags; +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -2263,7 +2486,8 @@ static int intel_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip) { if (!event->attr.freq) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; - if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS)) + if (!(event->attr.sample_type & + ~intel_pmu_free_running_flags(event))) event->hw.flags |= PERF_X86_EVENT_FREERUNNING; } if (x86_pmu.pebs_aliases) @@ -2694,6 +2918,8 @@ static __initconst const struct x86_pmu core_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, + .free_running_flags = PEBS_FREERUNNING_FLAGS, + /* * Intel PMCs cannot be accessed sanely above 32-bit width, * so we install an artificial 1<<31 period regardless of @@ -2732,6 +2958,7 @@ static __initconst const struct x86_pmu intel_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, + .free_running_flags = PEBS_FREERUNNING_FLAGS, /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of @@ -3269,6 +3496,29 @@ __init int intel_pmu_init(void) pr_cont("Broadwell events, "); break; + case 78: /* 14nm Skylake Mobile */ + case 94: /* 14nm Skylake Desktop */ + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_skl_event_constraints; + x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; + x86_pmu.extra_regs = intel_skl_extra_regs; + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + + x86_pmu.hw_config = hsw_hw_config; + x86_pmu.get_event_constraints = hsw_get_event_constraints; + x86_pmu.cpu_events = hsw_events_attrs; + WARN_ON(!x86_pmu.format_attrs); + x86_pmu.cpu_events = hsw_events_attrs; + pr_cont("Skylake events, "); + break; + default: switch (x86_pmu.version) { case 1: @@ -3338,7 +3588,7 @@ __init int intel_pmu_init(void) */ if (x86_pmu.extra_regs) { for (er = x86_pmu.extra_regs; er->msr; er++) { - er->extra_msr_access = check_msr(er->msr, 0x1ffUL); + er->extra_msr_access = check_msr(er->msr, 0x11UL); /* Disable LBR select mapping */ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) x86_pmu.lbr_sel_map = NULL; diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 43dd672d788b..54690e885759 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c @@ -62,9 +62,6 @@ struct bts_buffer { struct pmu bts_pmu; -void intel_pmu_enable_bts(u64 config); -void intel_pmu_disable_bts(void); - static size_t buf_size(struct page *page) { return 1 << (PAGE_SHIFT + page_private(page)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 71fc40238843..84f236ab96b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -224,6 +224,19 @@ union hsw_tsx_tuning { #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL +/* Same as HSW, plus TSC */ + +struct pebs_record_skl { + u64 flags, ip; + u64 ax, bx, cx, dx; + u64 si, di, bp, sp; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 status, dla, dse, lat; + u64 real_ip, tsx_tuning; + u64 tsc; +}; + void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -675,6 +688,28 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_skl_pebs_event_constraints[] = { + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */ + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; @@ -754,6 +789,11 @@ void intel_pmu_pebs_disable(struct perf_event *event) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; struct debug_store *ds = cpuc->ds; + bool large_pebs = ds->pebs_interrupt_threshold > + ds->pebs_buffer_base + x86_pmu.pebs_record_size; + + if (large_pebs) + intel_pmu_drain_pebs_buffer(); cpuc->pebs_enabled &= ~(1ULL << hwc->idx); @@ -762,12 +802,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); - if (ds->pebs_interrupt_threshold > - ds->pebs_buffer_base + x86_pmu.pebs_record_size) { - intel_pmu_drain_pebs_buffer(); - if (!pebs_is_enabled(cpuc)) - perf_sched_cb_dec(event->ctx->pmu); - } + if (large_pebs && !pebs_is_enabled(cpuc)) + perf_sched_cb_dec(event->ctx->pmu); if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); @@ -885,7 +921,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) return 0; } -static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs) +static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs) { if (pebs->tsx_tuning) { union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning }; @@ -894,7 +930,7 @@ static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs) return 0; } -static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) +static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs) { u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; @@ -918,7 +954,7 @@ static void setup_pebs_sample_data(struct perf_event *event, * unconditionally access the 'extra' entries. */ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - struct pebs_record_hsw *pebs = __pebs; + struct pebs_record_skl *pebs = __pebs; u64 sample_type; int fll, fst, dsrc; int fl = event->hw.flags; @@ -1016,6 +1052,16 @@ static void setup_pebs_sample_data(struct perf_event *event, data->txn = intel_hsw_transaction(pebs); } + /* + * v3 supplies an accurate time stamp, so we use that + * for the time stamp. + * + * We can only do this for the default trace clock. + */ + if (x86_pmu.intel_cap.pebs_format >= 3 && + event->attr.use_clockid == 0) + data->time = native_sched_clock_from_tsc(pebs->tsc); + if (has_branch_stack(event)) data->br_stack = &cpuc->lbr_stack; } @@ -1142,6 +1188,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) for (at = base; at < top; at += x86_pmu.pebs_record_size) { struct pebs_record_nhm *p = at; + u64 pebs_status; /* PEBS v3 has accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { @@ -1152,12 +1199,17 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) continue; } - bit = find_first_bit((unsigned long *)&p->status, + pebs_status = p->status & cpuc->pebs_enabled; + pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1; + + bit = find_first_bit((unsigned long *)&pebs_status, x86_pmu.max_pebs_events); - if (bit >= x86_pmu.max_pebs_events) - continue; - if (!test_bit(bit, cpuc->active_mask)) + if (WARN(bit >= x86_pmu.max_pebs_events, + "PEBS record without PEBS event! status=%Lx pebs_enabled=%Lx active_mask=%Lx", + (unsigned long long)p->status, (unsigned long long)cpuc->pebs_enabled, + *(unsigned long long *)cpuc->active_mask)) continue; + /* * The PEBS hardware does not deal well with the situation * when events happen near to each other and multiple bits @@ -1172,27 +1224,21 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * one, and it's not possible to reconstruct all events * that caused the PEBS record. It's called collision. * If collision happened, the record will be dropped. - * */ - if (p->status != (1 << bit)) { - u64 pebs_status; - - /* slow path */ - pebs_status = p->status & cpuc->pebs_enabled; - pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1; - if (pebs_status != (1 << bit)) { - for_each_set_bit(i, (unsigned long *)&pebs_status, - MAX_PEBS_EVENTS) - error[i]++; - continue; - } + if (p->status != (1ULL << bit)) { + for_each_set_bit(i, (unsigned long *)&pebs_status, + x86_pmu.max_pebs_events) + error[i]++; + continue; } + counts[bit]++; } for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; + event = cpuc->events[bit]; WARN_ON_ONCE(!event); WARN_ON_ONCE(!event->attr.precise_ip); @@ -1245,6 +1291,14 @@ void __init intel_ds_init(void) x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; break; + case 3: + pr_cont("PEBS fmt3%c, ", pebs_type); + x86_pmu.pebs_record_size = + sizeof(struct pebs_record_skl); + x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; + x86_pmu.free_running_flags |= PERF_SAMPLE_TIME; + break; + default: printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 452a7bd2dedb..b2c9475b7ff2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -13,7 +13,8 @@ enum { LBR_FORMAT_EIP = 0x02, LBR_FORMAT_EIP_FLAGS = 0x03, LBR_FORMAT_EIP_FLAGS2 = 0x04, - LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_EIP_FLAGS2, + LBR_FORMAT_INFO = 0x05, + LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO, }; static enum { @@ -140,6 +141,13 @@ static void __intel_pmu_lbr_enable(bool pmi) u64 debugctl, lbr_select = 0, orig_debugctl; /* + * No need to unfreeze manually, as v4 can do that as part + * of the GLOBAL_STATUS ack. + */ + if (pmi && x86_pmu.version >= 4) + return; + + /* * No need to reprogram LBR_SELECT in a PMI, as it * did not change. */ @@ -186,6 +194,8 @@ static void intel_pmu_lbr_reset_64(void) for (i = 0; i < x86_pmu.lbr_nr; i++) { wrmsrl(x86_pmu.lbr_from + i, 0); wrmsrl(x86_pmu.lbr_to + i, 0); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + i, 0); } } @@ -230,10 +240,12 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < x86_pmu.lbr_nr; i++) { + for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } task_ctx->lbr_stack_state = LBR_NONE; } @@ -251,10 +263,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < x86_pmu.lbr_nr; i++) { + for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } task_ctx->lbr_stack_state = LBR_VALID; } @@ -411,16 +425,31 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) u64 tos = intel_pmu_lbr_tos(); int i; int out = 0; + int num = x86_pmu.lbr_nr; - for (i = 0; i < x86_pmu.lbr_nr; i++) { + if (cpuc->lbr_sel->config & LBR_CALL_STACK) + num = tos; + + for (i = 0; i < num; i++) { unsigned long lbr_idx = (tos - i) & mask; u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; int skip = 0; + u16 cycles = 0; int lbr_flags = lbr_desc[lbr_format]; rdmsrl(x86_pmu.lbr_from + lbr_idx, from); rdmsrl(x86_pmu.lbr_to + lbr_idx, to); + if (lbr_format == LBR_FORMAT_INFO) { + u64 info; + + rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info); + mis = !!(info & LBR_INFO_MISPRED); + pred = !mis; + in_tx = !!(info & LBR_INFO_IN_TX); + abort = !!(info & LBR_INFO_ABORT); + cycles = (info & LBR_INFO_CYCLES); + } if (lbr_flags & LBR_EIP_FLAGS) { mis = !!(from & LBR_FROM_FLAG_MISPRED); pred = !mis; @@ -450,6 +479,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) cpuc->lbr_entries[out].predicted = pred; cpuc->lbr_entries[out].in_tx = in_tx; cpuc->lbr_entries[out].abort = abort; + cpuc->lbr_entries[out].cycles = cycles; cpuc->lbr_entries[out].reserved = 0; out++; } @@ -947,6 +977,26 @@ void intel_pmu_lbr_init_hsw(void) pr_cont("16-deep LBR, "); } +/* skylake */ +__init void intel_pmu_lbr_init_skl(void) +{ + x86_pmu.lbr_nr = 32; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; + x86_pmu.lbr_to = MSR_LBR_NHM_TO; + + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; + x86_pmu.lbr_sel_map = hsw_lbr_sel_map; + + /* + * SW branch filter usage: + * - support syscall, sysret capture. + * That requires LBR_FAR but that means far + * jmp need to be filtered out + */ + pr_cont("32-deep LBR, "); +} + /* atom */ void __init intel_pmu_lbr_init_atom(void) { diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index 183de719628d..42169283448b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -65,15 +65,21 @@ static struct pt_cap_desc { } pt_caps[] = { PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff), PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)), + PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)), + PT_CAP(mtc, 0, CR_EBX, BIT(3)), PT_CAP(topa_output, 0, CR_ECX, BIT(0)), PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)), + PT_CAP(single_range_output, 0, CR_ECX, BIT(2)), PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)), + PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000), + PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff), + PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000), }; static u32 pt_cap_get(enum pt_capabilities cap) { struct pt_cap_desc *cd = &pt_caps[cap]; - u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg]; + u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; unsigned int shift = __ffs(cd->mask); return (c & cd->mask) >> shift; @@ -94,12 +100,22 @@ static struct attribute_group pt_cap_group = { .name = "caps", }; +PMU_FORMAT_ATTR(cyc, "config:1" ); +PMU_FORMAT_ATTR(mtc, "config:9" ); PMU_FORMAT_ATTR(tsc, "config:10" ); PMU_FORMAT_ATTR(noretcomp, "config:11" ); +PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); +PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); +PMU_FORMAT_ATTR(psb_period, "config:24-27" ); static struct attribute *pt_formats_attr[] = { + &format_attr_cyc.attr, + &format_attr_mtc.attr, &format_attr_tsc.attr, &format_attr_noretcomp.attr, + &format_attr_mtc_period.attr, + &format_attr_cyc_thresh.attr, + &format_attr_psb_period.attr, NULL, }; @@ -129,10 +145,10 @@ static int __init pt_pmu_hw_init(void) for (i = 0; i < PT_CPUID_LEAVES; i++) { cpuid_count(20, i, - &pt_pmu.caps[CR_EAX + i*4], - &pt_pmu.caps[CR_EBX + i*4], - &pt_pmu.caps[CR_ECX + i*4], - &pt_pmu.caps[CR_EDX + i*4]); + &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]); } ret = -ENOMEM; @@ -170,15 +186,65 @@ fail: return ret; } -#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC) +#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \ + RTIT_CTL_CYC_THRESH | \ + RTIT_CTL_PSB_FREQ) + +#define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \ + RTIT_CTL_MTC_RANGE) + +#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \ + RTIT_CTL_DISRETC | \ + RTIT_CTL_CYC_PSB | \ + RTIT_CTL_MTC) static bool pt_event_valid(struct perf_event *event) { u64 config = event->attr.config; + u64 allowed, requested; if ((config & PT_CONFIG_MASK) != config) return false; + if (config & RTIT_CTL_CYC_PSB) { + if (!pt_cap_get(PT_CAP_psb_cyc)) + return false; + + allowed = pt_cap_get(PT_CAP_psb_periods); + requested = (config & RTIT_CTL_PSB_FREQ) >> + RTIT_CTL_PSB_FREQ_OFFSET; + if (requested && (!(allowed & BIT(requested)))) + return false; + + allowed = pt_cap_get(PT_CAP_cycle_thresholds); + requested = (config & RTIT_CTL_CYC_THRESH) >> + RTIT_CTL_CYC_THRESH_OFFSET; + if (requested && (!(allowed & BIT(requested)))) + return false; + } + + if (config & RTIT_CTL_MTC) { + /* + * In the unlikely case that CPUID lists valid mtc periods, + * but not the mtc capability, drop out here. + * + * Spec says that setting mtc period bits while mtc bit in + * CPUID is 0 will #GP, so better safe than sorry. + */ + if (!pt_cap_get(PT_CAP_mtc)) + return false; + + allowed = pt_cap_get(PT_CAP_mtc_periods); + if (!allowed) + return false; + + requested = (config & RTIT_CTL_MTC_RANGE) >> + RTIT_CTL_MTC_RANGE_OFFSET; + + if (!(allowed & BIT(requested))) + return false; + } + return true; } @@ -191,6 +257,11 @@ static void pt_config(struct perf_event *event) { u64 reg; + if (!event->hw.itrace_started) { + event->hw.itrace_started = 1; + wrmsrl(MSR_IA32_RTIT_STATUS, 0); + } + reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN; if (!event->attr.exclude_kernel) @@ -910,7 +981,6 @@ void intel_pt_interrupt(void) pt_config_buffer(buf->cur->table, buf->cur_idx, buf->output_off); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); pt_config(event); } } @@ -934,7 +1004,6 @@ static void pt_event_start(struct perf_event *event, int mode) pt_config_buffer(buf->cur->table, buf->cur_idx, buf->output_off); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); pt_config(event); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 5cbd4e64feb5..81431c0f0614 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -86,6 +86,10 @@ static const char *rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { 1<<RAPL_IDX_RAM_NRG_STAT|\ 1<<RAPL_IDX_PP1_NRG_STAT) +/* Knights Landing has PKG, RAM */ +#define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_RAM_NRG_STAT) + /* * event code: LSB 8 bits, passed in attr->config * any other bit is reserved @@ -486,6 +490,18 @@ static struct attribute *rapl_events_hsw_attr[] = { NULL, }; +static struct attribute *rapl_events_knl_attr[] = { + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_ram_unit), + + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_ram_scale), + NULL, +}; + static struct attribute_group rapl_pmu_events_group = { .name = "events", .attrs = NULL, /* patched at runtime */ @@ -730,6 +746,10 @@ static int __init rapl_pmu_init(void) rapl_cntr_mask = RAPL_IDX_SRV; rapl_pmu_events_group.attrs = rapl_events_srv_attr; break; + case 87: /* Knights Landing */ + rapl_add_quirk(rapl_hsw_server_quirk); + rapl_cntr_mask = RAPL_IDX_KNL; + rapl_pmu_events_group.attrs = rapl_events_knl_attr; default: /* unsupported */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 21b5e38c921b..560e5255b15e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -911,6 +911,9 @@ static int __init uncore_pci_init(void) case 63: /* Haswell-EP */ ret = hswep_uncore_pci_init(); break; + case 86: /* BDX-DE */ + ret = bdx_uncore_pci_init(); + break; case 42: /* Sandy Bridge */ ret = snb_uncore_pci_init(); break; @@ -1209,6 +1212,11 @@ static int __init uncore_cpu_init(void) break; case 42: /* Sandy Bridge */ case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + case 69: /* Haswell */ + case 70: /* Haswell */ + case 61: /* Broadwell */ + case 71: /* Broadwell */ snb_uncore_cpu_init(); break; case 45: /* Sandy Bridge-EP */ @@ -1224,6 +1232,9 @@ static int __init uncore_cpu_init(void) case 63: /* Haswell-EP */ hswep_uncore_cpu_init(); break; + case 86: /* BDX-DE */ + bdx_uncore_cpu_init(); + break; default: return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 0f77f0a196e4..72c54c2e5b1a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -336,6 +336,8 @@ int ivbep_uncore_pci_init(void); void ivbep_uncore_cpu_init(void); int hswep_uncore_pci_init(void); void hswep_uncore_cpu_init(void); +int bdx_uncore_pci_init(void); +void bdx_uncore_cpu_init(void); /* perf_event_intel_uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c index b005a78c7012..f78574b3cb55 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c @@ -45,6 +45,11 @@ #define SNB_UNC_CBO_0_PER_CTR0 0x706 #define SNB_UNC_CBO_MSR_OFFSET 0x10 +/* SNB ARB register */ +#define SNB_UNC_ARB_PER_CTR0 0x3b0 +#define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 +#define SNB_UNC_ARB_MSR_OFFSET 0x10 + /* NHM global control register */ #define NHM_UNC_PERF_GLOBAL_CTL 0x391 #define NHM_UNC_FIXED_CTR 0x394 @@ -115,7 +120,7 @@ static struct intel_uncore_ops snb_uncore_msr_ops = { .read_counter = uncore_msr_read_counter, }; -static struct event_constraint snb_uncore_cbox_constraints[] = { +static struct event_constraint snb_uncore_arb_constraints[] = { UNCORE_EVENT_CONSTRAINT(0x80, 0x1), UNCORE_EVENT_CONSTRAINT(0x83, 0x1), EVENT_CONSTRAINT_END @@ -134,14 +139,28 @@ static struct intel_uncore_type snb_uncore_cbox = { .single_fixed = 1, .event_mask = SNB_UNC_RAW_EVENT_MASK, .msr_offset = SNB_UNC_CBO_MSR_OFFSET, - .constraints = snb_uncore_cbox_constraints, .ops = &snb_uncore_msr_ops, .format_group = &snb_uncore_format_group, .event_descs = snb_uncore_events, }; +static struct intel_uncore_type snb_uncore_arb = { + .name = "arb", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 44, + .perf_ctr = SNB_UNC_ARB_PER_CTR0, + .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = SNB_UNC_ARB_MSR_OFFSET, + .constraints = snb_uncore_arb_constraints, + .ops = &snb_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + static struct intel_uncore_type *snb_msr_uncores[] = { &snb_uncore_cbox, + &snb_uncore_arb, NULL, }; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 6d6e85dd5849..694510a887dc 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -2215,7 +2215,7 @@ static struct intel_uncore_type *hswep_pci_uncores[] = { NULL, }; -static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { +static const struct pci_device_id hswep_uncore_pci_ids[] = { { /* Home Agent 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), @@ -2321,3 +2321,167 @@ int hswep_uncore_pci_init(void) return 0; } /* end of Haswell-EP uncore support */ + +/* BDX-DE uncore support */ + +static struct intel_uncore_type bdx_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = HSWEP_U_MSR_PMON_CTR0, + .event_ctl = HSWEP_U_MSR_PMON_CTL0, + .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, + .num_shared_regs = 1, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_ubox_format_group, +}; + +static struct event_constraint bdx_uncore_cbox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x09, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x1), + UNCORE_EVENT_CONSTRAINT(0x36, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type bdx_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_C0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, + .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = bdx_uncore_cbox_constraints, + .ops = &hswep_uncore_cbox_ops, + .format_group = &hswep_uncore_cbox_format_group, +}; + +static struct intel_uncore_type *bdx_msr_uncores[] = { + &bdx_uncore_ubox, + &bdx_uncore_cbox, + &hswep_uncore_pcu, + NULL, +}; + +void bdx_uncore_cpu_init(void) +{ + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + uncore_msr_uncores = bdx_msr_uncores; +} + +static struct intel_uncore_type bdx_uncore_ha = { + .name = "ha", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type bdx_uncore_imc = { + .name = "imc", + .num_counters = 5, + .num_boxes = 2, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + .event_descs = hswep_uncore_imc_events, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type bdx_uncore_irp = { + .name = "irp", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &hswep_uncore_irp_ops, + .format_group = &snbep_uncore_format_group, +}; + + +static struct event_constraint bdx_uncore_r2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x10, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x3), + UNCORE_EVENT_CONSTRAINT(0x13, 0x1), + UNCORE_EVENT_CONSTRAINT(0x23, 0x1), + UNCORE_EVENT_CONSTRAINT(0x25, 0x1), + UNCORE_EVENT_CONSTRAINT(0x26, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type bdx_uncore_r2pcie = { + .name = "r2pcie", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .constraints = bdx_uncore_r2pcie_constraints, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +enum { + BDX_PCI_UNCORE_HA, + BDX_PCI_UNCORE_IMC, + BDX_PCI_UNCORE_IRP, + BDX_PCI_UNCORE_R2PCIE, +}; + +static struct intel_uncore_type *bdx_pci_uncores[] = { + [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, + [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, + [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, + [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, + NULL, +}; + +static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = { + { /* Home Agent 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), + }, + { /* MC0 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), + }, + { /* MC0 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), + }, + { /* IRP */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), + }, + { /* R2PCIe */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver bdx_uncore_pci_driver = { + .name = "bdx_uncore", + .id_table = bdx_uncore_pci_ids, +}; + +int bdx_uncore_pci_init(void) +{ + int ret = snbep_pci2phy_map_init(0x6f1e); + + if (ret) + return ret; + uncore_pci_uncores = bdx_pci_uncores; + uncore_pci_driver = &bdx_uncore_pci_driver; + return 0; +} + +/* end of BDX-DE uncore support */ diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c new file mode 100644 index 000000000000..086b12eae794 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -0,0 +1,242 @@ +#include <linux/perf_event.h> + +enum perf_msr_id { + PERF_MSR_TSC = 0, + PERF_MSR_APERF = 1, + PERF_MSR_MPERF = 2, + PERF_MSR_PPERF = 3, + PERF_MSR_SMI = 4, + + PERF_MSR_EVENT_MAX, +}; + +bool test_aperfmperf(int idx) +{ + return boot_cpu_has(X86_FEATURE_APERFMPERF); +} + +bool test_intel(int idx) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || + boot_cpu_data.x86 != 6) + return false; + + switch (boot_cpu_data.x86_model) { + case 30: /* 45nm Nehalem */ + case 26: /* 45nm Nehalem-EP */ + case 46: /* 45nm Nehalem-EX */ + + case 37: /* 32nm Westmere */ + case 44: /* 32nm Westmere-EP */ + case 47: /* 32nm Westmere-EX */ + + case 42: /* 32nm SandyBridge */ + case 45: /* 32nm SandyBridge-E/EN/EP */ + + case 58: /* 22nm IvyBridge */ + case 62: /* 22nm IvyBridge-EP/EX */ + + case 60: /* 22nm Haswell Core */ + case 63: /* 22nm Haswell Server */ + case 69: /* 22nm Haswell ULT */ + case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ + + case 61: /* 14nm Broadwell Core-M */ + case 86: /* 14nm Broadwell Xeon D */ + case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */ + case 79: /* 14nm Broadwell Server */ + + case 55: /* 22nm Atom "Silvermont" */ + case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ + case 76: /* 14nm Atom "Airmont" */ + if (idx == PERF_MSR_SMI) + return true; + break; + + case 78: /* 14nm Skylake Mobile */ + case 94: /* 14nm Skylake Desktop */ + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) + return true; + break; + } + + return false; +} + +struct perf_msr { + u64 msr; + struct perf_pmu_events_attr *attr; + bool (*test)(int idx); +}; + +PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00"); +PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01"); +PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02"); +PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03"); +PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04"); + +static struct perf_msr msr[] = { + [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, }, + [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, }, + [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, }, + [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, }, + [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, }, +}; + +static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = { + NULL, +}; + +static struct attribute_group events_attr_group = { + .name = "events", + .attrs = events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); +static struct attribute *format_attrs[] = { + &format_attr_event.attr, + NULL, +}; +static struct attribute_group format_attr_group = { + .name = "format", + .attrs = format_attrs, +}; + +static const struct attribute_group *attr_groups[] = { + &events_attr_group, + &format_attr_group, + NULL, +}; + +static int msr_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (cfg >= PERF_MSR_EVENT_MAX) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + if (!msr[cfg].attr) + return -EINVAL; + + event->hw.idx = -1; + event->hw.event_base = msr[cfg].msr; + event->hw.config = cfg; + + return 0; +} + +static inline u64 msr_read_counter(struct perf_event *event) +{ + u64 now; + + if (event->hw.event_base) + rdmsrl(event->hw.event_base, now); + else + rdtscll(now); + + return now; +} +static void msr_event_update(struct perf_event *event) +{ + u64 prev, now; + s64 delta; + + /* Careful, an NMI might modify the previous event value. */ +again: + prev = local64_read(&event->hw.prev_count); + now = msr_read_counter(event); + + if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) + goto again; + + delta = now - prev; + if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) { + delta <<= 32; + delta >>= 32; /* sign extend */ + } + local64_add(now - prev, &event->count); +} + +static void msr_event_start(struct perf_event *event, int flags) +{ + u64 now; + + now = msr_read_counter(event); + local64_set(&event->hw.prev_count, now); +} + +static void msr_event_stop(struct perf_event *event, int flags) +{ + msr_event_update(event); +} + +static void msr_event_del(struct perf_event *event, int flags) +{ + msr_event_stop(event, PERF_EF_UPDATE); +} + +static int msr_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + msr_event_start(event, flags); + + return 0; +} + +static struct pmu pmu_msr = { + .task_ctx_nr = perf_sw_context, + .attr_groups = attr_groups, + .event_init = msr_event_init, + .add = msr_event_add, + .del = msr_event_del, + .start = msr_event_start, + .stop = msr_event_stop, + .read = msr_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT, +}; + +static int __init msr_init(void) +{ + int i, j = 0; + + if (!boot_cpu_has(X86_FEATURE_TSC)) { + pr_cont("no MSR PMU driver.\n"); + return 0; + } + + /* Probe the MSRs. */ + for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) { + u64 val; + + /* + * Virt sucks arse; you cannot tell if a R/O MSR is present :/ + */ + if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) + msr[i].attr = NULL; + } + + /* List remaining MSRs in the sysfs attrs. */ + for (i = 0; i < PERF_MSR_EVENT_MAX; i++) { + if (msr[i].attr) + events_attrs[j++] = &msr[i].attr->attr.attr; + } + events_attrs[j] = NULL; + + perf_pmu_register(&pmu_msr, "msr", -1); + + return 0; +} +device_initcall(msr_init); diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 83741a71558f..bd3507da39f0 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, return notifier_from_errno(err); } -static struct notifier_block __refdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier = { .notifier_call = cpuid_class_cpu_callback, }; diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index ce95676abd60..4d38416e2a7f 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -110,7 +110,7 @@ static void init_espfix_random(void) */ if (!arch_get_random_long(&rand)) { /* The constant is an arbitrary large prime */ - rdtscll(rand); + rand = rdtsc(); rand *= 0xc345c6b72fd16123UL; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 10757d0a3fcf..f75c5908c7a6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -735,7 +735,7 @@ static int hpet_clocksource_register(void) /* Verify whether hpet counter works */ t1 = hpet_readl(HPET_COUNTER); - rdtscll(start); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -745,7 +745,7 @@ static int hpet_clocksource_register(void) */ do { rep_nop(); - rdtscll(now); + now = rdtsc(); } while ((now - start) < 200000UL); if (t1 == hpet_readl(HPET_COUNTER)) { diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 7114ba220fd4..50a3fad5b89f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -32,6 +32,7 @@ #include <linux/irqflags.h> #include <linux/notifier.h> #include <linux/kallsyms.h> +#include <linux/kprobes.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/kernel.h> @@ -179,7 +180,11 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) va = info->address; len = bp->attr.bp_len; - return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); + /* + * We don't need to worry about va + len - 1 overflowing: + * we already require that va is aligned to a multiple of len. + */ + return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); } int arch_bp_generic_fields(int x86_len, int x86_type, @@ -243,6 +248,20 @@ static int arch_build_bp_info(struct perf_event *bp) info->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: + /* + * We don't allow kernel breakpoints in places that are not + * acceptable for kprobes. On non-kprobes kernels, we don't + * allow kernel breakpoints at all. + */ + if (bp->attr.bp_addr >= TASK_SIZE_MAX) { +#ifdef CONFIG_KPROBES + if (within_kprobe_blacklist(bp->attr.bp_addr)) + return -EINVAL; +#else + return -EINVAL; +#endif + } + info->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. @@ -276,8 +295,18 @@ static int arch_build_bp_info(struct perf_event *bp) break; #endif default: + /* AMD range breakpoint */ if (!is_power_of_2(bp->attr.bp_len)) return -EINVAL; + if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) + return -EINVAL; + /* + * It's impossible to use a range breakpoint to fake out + * user vs kernel detection because bp_len - 1 can't + * have the high bit set. If we ever allow range instruction + * breakpoints, then we'll have to check for kprobe-blacklisted + * addresses anywhere in the range. + */ if (!cpu_has_bpext) return -EOPNOTSUPP; info->mask = bp->attr.bp_len - 1; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index c7dfe1be784e..ae00b355114d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -139,10 +139,13 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_puts(p, " Machine check polls\n"); #endif #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) - seq_printf(p, "%*s: ", prec, "HYP"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); - seq_puts(p, " Hypervisor callback interrupts\n"); + if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) { + seq_printf(p, "%*s: ", prec, "HYP"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + irq_stats(j)->irq_hv_callback_count); + seq_puts(p, " Hypervisor callback interrupts\n"); + } #endif seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) @@ -216,8 +219,23 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) unsigned vector = ~regs->orig_ax; unsigned irq; + /* + * NB: Unlike exception entries, IRQ entries do not reliably + * handle context tracking in the low-level entry code. This is + * because syscall entries execute briefly with IRQs on before + * updating context tracking state, so we can take an IRQ from + * kernel mode with CONTEXT_USER. The low-level entry code only + * updates the context if we came from user mode, so we won't + * switch to CONTEXT_KERNEL. We'll fix that once the syscall + * code is cleaned up enough that we can cleanly defer enabling + * IRQs. + */ + entering_irq(); + /* entering_irq() tells RCU that we're not quiescent. Check it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); + irq = __this_cpu_read(vector_irq[vector]); if (!handle_irq(irq, regs)) { diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index ca83f7ac388b..961e51e9c6f6 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -223,9 +223,6 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, memset(¶ms->hd0_info, 0, sizeof(params->hd0_info)); memset(¶ms->hd1_info, 0, sizeof(params->hd1_info)); - /* Default sysdesc table */ - params->sys_desc_table.length = 0; - if (image->type == KEXEC_TYPE_CRASH) { ret = crash_setup_memmap_entries(image, params); if (ret) diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index d05bd2e2ee91..697f90db0e37 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w) a->handler, whole_msecs, decimal_msecs); } -static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) +static int nmi_handle(unsigned int type, struct pt_regs *regs) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; @@ -213,7 +213,7 @@ static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ - if (nmi_handle(NMI_SERR, regs, false)) + if (nmi_handle(NMI_SERR, regs)) return; pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", @@ -247,7 +247,7 @@ io_check_error(unsigned char reason, struct pt_regs *regs) unsigned long i; /* check to see if anyone registered against these types of errors */ - if (nmi_handle(NMI_IO_CHECK, regs, false)) + if (nmi_handle(NMI_IO_CHECK, regs)) return; pr_emerg( @@ -284,7 +284,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) * as only the first one is ever run (unless it can actually determine * if it caused the NMI) */ - handled = nmi_handle(NMI_UNKNOWN, regs, false); + handled = nmi_handle(NMI_UNKNOWN, regs); if (handled) { __this_cpu_add(nmi_stats.unknown, handled); return; @@ -332,7 +332,7 @@ static void default_do_nmi(struct pt_regs *regs) __this_cpu_write(last_nmi_rip, regs->ip); - handled = nmi_handle(NMI_LOCAL, regs, b2b); + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { /* diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 58bcfb67c01f..f68e48f5f6c2 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -351,9 +351,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, .write_msr = native_write_msr_safe, - .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, - .read_tscp = native_read_tscp, .load_tr_desc = native_load_tr_desc, .set_ldt = native_set_ldt, .load_gdt = native_load_gdt, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index e1b013696dde..c89f50a76e97 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); -DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); @@ -52,7 +51,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_cpu_ops, clts); - PATCH_SITE(pv_cpu_ops, read_tsc); #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): if (pv_is_native_spin_unlock()) { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c27cad726765..6d0e62ae8516 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,8 @@ #include <asm/debugreg.h> #include <asm/nmi.h> #include <asm/tlbflush.h> +#include <asm/mce.h> +#include <asm/vm86.h> /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -110,6 +112,8 @@ void exit_thread(void) kfree(bp); } + free_vm86(t); + fpu__drop(fpu); } @@ -319,6 +323,7 @@ void stop_this_cpu(void *dummy) */ set_cpu_online(smp_processor_id(), false); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); for (;;) halt(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f73c962fe636..c13df2c735f8 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -53,6 +53,7 @@ #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/switch_to.h> +#include <asm/vm86.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index f6b916387590..3c1bbcf12924 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -121,6 +121,7 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, @@ -128,6 +129,7 @@ void release_thread(struct task_struct *dead_task) dead_task->mm->context.ldt->size); BUG(); } +#endif } } @@ -248,8 +250,8 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) __USER_CS, __USER_DS, 0); } -#ifdef CONFIG_IA32_EMULATION -void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) +#ifdef CONFIG_COMPAT +void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) { start_thread_common(regs, new_ip, new_sp, test_thread_flag(TIF_X32) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 9be72bc3613f..558f50edebca 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -37,12 +37,10 @@ #include <asm/proto.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> +#include <asm/syscall.h> #include "tls.h" -#define CREATE_TRACE_POINTS -#include <trace/events/syscalls.h> - enum x86_regset { REGSET_GENERAL, REGSET_FP, @@ -1123,6 +1121,73 @@ static int genregs32_set(struct task_struct *target, return ret; } +static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + unsigned long addr = caddr; + unsigned long data = cdata; + void __user *datap = compat_ptr(data); + int ret; + __u32 val; + + switch (request) { + case PTRACE_PEEKUSR: + ret = getreg32(child, addr, &val); + if (ret == 0) + ret = put_user(val, (__u32 __user *)datap); + break; + + case PTRACE_POKEUSR: + ret = putreg32(child, addr, data); + break; + + case PTRACE_GETREGS: /* Get all gp regs from the child. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_GENERAL, + 0, sizeof(struct user_regs_struct32), + datap); + + case PTRACE_SETREGS: /* Set all gp regs in the child. */ + return copy_regset_from_user(child, &user_x86_32_view, + REGSET_GENERAL, 0, + sizeof(struct user_regs_struct32), + datap); + + case PTRACE_GETFPREGS: /* Get the child FPU state. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_FP, 0, + sizeof(struct user_i387_ia32_struct), + datap); + + case PTRACE_SETFPREGS: /* Set the child FPU state. */ + return copy_regset_from_user( + child, &user_x86_32_view, REGSET_FP, + 0, sizeof(struct user_i387_ia32_struct), datap); + + case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_XFP, 0, + sizeof(struct user32_fxsr_struct), + datap); + + case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ + return copy_regset_from_user(child, &user_x86_32_view, + REGSET_XFP, 0, + sizeof(struct user32_fxsr_struct), + datap); + + case PTRACE_GET_THREAD_AREA: + case PTRACE_SET_THREAD_AREA: + return arch_ptrace(child, request, addr, data); + + default: + return compat_ptrace_request(child, request, addr, data); + } + + return ret; +} +#endif /* CONFIG_IA32_EMULATION */ + #ifdef CONFIG_X86_X32_ABI static long x32_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, @@ -1211,78 +1276,21 @@ static long x32_arch_ptrace(struct task_struct *child, } #endif +#ifdef CONFIG_COMPAT long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { - unsigned long addr = caddr; - unsigned long data = cdata; - void __user *datap = compat_ptr(data); - int ret; - __u32 val; - #ifdef CONFIG_X86_X32_ABI if (!is_ia32_task()) return x32_arch_ptrace(child, request, caddr, cdata); #endif - - switch (request) { - case PTRACE_PEEKUSR: - ret = getreg32(child, addr, &val); - if (ret == 0) - ret = put_user(val, (__u32 __user *)datap); - break; - - case PTRACE_POKEUSR: - ret = putreg32(child, addr, data); - break; - - case PTRACE_GETREGS: /* Get all gp regs from the child. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_GENERAL, - 0, sizeof(struct user_regs_struct32), - datap); - - case PTRACE_SETREGS: /* Set all gp regs in the child. */ - return copy_regset_from_user(child, &user_x86_32_view, - REGSET_GENERAL, 0, - sizeof(struct user_regs_struct32), - datap); - - case PTRACE_GETFPREGS: /* Get the child FPU state. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_FP, 0, - sizeof(struct user_i387_ia32_struct), - datap); - - case PTRACE_SETFPREGS: /* Set the child FPU state. */ - return copy_regset_from_user( - child, &user_x86_32_view, REGSET_FP, - 0, sizeof(struct user_i387_ia32_struct), datap); - - case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_XFP, 0, - sizeof(struct user32_fxsr_struct), - datap); - - case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ - return copy_regset_from_user(child, &user_x86_32_view, - REGSET_XFP, 0, - sizeof(struct user32_fxsr_struct), - datap); - - case PTRACE_GET_THREAD_AREA: - case PTRACE_SET_THREAD_AREA: - return arch_ptrace(child, request, addr, data); - - default: - return compat_ptrace_request(child, request, addr, data); - } - - return ret; +#ifdef CONFIG_IA32_EMULATION + return ia32_arch_ptrace(child, request, caddr, cdata); +#else + return 0; +#endif } - -#endif /* CONFIG_IA32_EMULATION */ +#endif /* CONFIG_COMPAT */ #ifdef CONFIG_X86_64 @@ -1434,201 +1442,3 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } - -static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) -{ -#ifdef CONFIG_X86_64 - if (arch == AUDIT_ARCH_X86_64) { - audit_syscall_entry(regs->orig_ax, regs->di, - regs->si, regs->dx, regs->r10); - } else -#endif - { - audit_syscall_entry(regs->orig_ax, regs->bx, - regs->cx, regs->dx, regs->si); - } -} - -/* - * We can return 0 to resume the syscall or anything else to go to phase - * 2. If we resume the syscall, we need to put something appropriate in - * regs->orig_ax. - * - * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax - * are fully functional. - * - * For phase 2's benefit, our return value is: - * 0: resume the syscall - * 1: go to phase 2; no seccomp phase 2 needed - * anything else: go to phase 2; pass return value to seccomp - */ -unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) -{ - unsigned long ret = 0; - u32 work; - - BUG_ON(regs != task_pt_regs(current)); - - work = ACCESS_ONCE(current_thread_info()->flags) & - _TIF_WORK_SYSCALL_ENTRY; - - /* - * If TIF_NOHZ is set, we are required to call user_exit() before - * doing anything that could touch RCU. - */ - if (work & _TIF_NOHZ) { - user_exit(); - work &= ~_TIF_NOHZ; - } - -#ifdef CONFIG_SECCOMP - /* - * Do seccomp first -- it should minimize exposure of other - * code, and keeping seccomp fast is probably more valuable - * than the rest of this. - */ - if (work & _TIF_SECCOMP) { - struct seccomp_data sd; - - sd.arch = arch; - sd.nr = regs->orig_ax; - sd.instruction_pointer = regs->ip; -#ifdef CONFIG_X86_64 - if (arch == AUDIT_ARCH_X86_64) { - sd.args[0] = regs->di; - sd.args[1] = regs->si; - sd.args[2] = regs->dx; - sd.args[3] = regs->r10; - sd.args[4] = regs->r8; - sd.args[5] = regs->r9; - } else -#endif - { - sd.args[0] = regs->bx; - sd.args[1] = regs->cx; - sd.args[2] = regs->dx; - sd.args[3] = regs->si; - sd.args[4] = regs->di; - sd.args[5] = regs->bp; - } - - BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); - BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); - - ret = seccomp_phase1(&sd); - if (ret == SECCOMP_PHASE1_SKIP) { - regs->orig_ax = -1; - ret = 0; - } else if (ret != SECCOMP_PHASE1_OK) { - return ret; /* Go directly to phase 2 */ - } - - work &= ~_TIF_SECCOMP; - } -#endif - - /* Do our best to finish without phase 2. */ - if (work == 0) - return ret; /* seccomp and/or nohz only (ret == 0 here) */ - -#ifdef CONFIG_AUDITSYSCALL - if (work == _TIF_SYSCALL_AUDIT) { - /* - * If there is no more work to be done except auditing, - * then audit in phase 1. Phase 2 always audits, so, if - * we audit here, then we can't go on to phase 2. - */ - do_audit_syscall_entry(regs, arch); - return 0; - } -#endif - - return 1; /* Something is enabled that we can't handle in phase 1 */ -} - -/* Returns the syscall nr to run (which should match regs->orig_ax). */ -long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, - unsigned long phase1_result) -{ - long ret = 0; - u32 work = ACCESS_ONCE(current_thread_info()->flags) & - _TIF_WORK_SYSCALL_ENTRY; - - BUG_ON(regs != task_pt_regs(current)); - - /* - * If we stepped into a sysenter/syscall insn, it trapped in - * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. - * If user-mode had set TF itself, then it's still clear from - * do_debug() and we need to set it again to restore the user - * state. If we entered on the slow path, TF was already set. - */ - if (work & _TIF_SINGLESTEP) - regs->flags |= X86_EFLAGS_TF; - -#ifdef CONFIG_SECCOMP - /* - * Call seccomp_phase2 before running the other hooks so that - * they can see any changes made by a seccomp tracer. - */ - if (phase1_result > 1 && seccomp_phase2(phase1_result)) { - /* seccomp failures shouldn't expose any additional code. */ - return -1; - } -#endif - - if (unlikely(work & _TIF_SYSCALL_EMU)) - ret = -1L; - - if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && - tracehook_report_syscall_entry(regs)) - ret = -1L; - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->orig_ax); - - do_audit_syscall_entry(regs, arch); - - return ret ?: regs->orig_ax; -} - -long syscall_trace_enter(struct pt_regs *regs) -{ - u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; - unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); - - if (phase1_result == 0) - return regs->orig_ax; - else - return syscall_trace_enter_phase2(regs, arch, phase1_result); -} - -void syscall_trace_leave(struct pt_regs *regs) -{ - bool step; - - /* - * We may come here right after calling schedule_user() - * or do_notify_resume(), in which case we can be in RCU - * user mode. - */ - user_exit(); - - audit_syscall_exit(regs); - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->ax); - - /* - * If TIF_SYSCALL_EMU is set, we only get here because of - * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). - * We already reported this syscall instruction in - * syscall_trace_enter(). - */ - step = unlikely(test_thread_flag(TIF_SINGLESTEP)) && - !test_thread_flag(TIF_SYSCALL_EMU); - if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); - - user_enter(); -} diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 80f874bf999e..b143c2d04420 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -916,11 +916,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 apm_info.bios = boot_params.apm_bios_info; ist_info = boot_params.ist_info; - if (boot_params.sys_desc_table.length != 0) { - machine_id = boot_params.sys_desc_table.table[0]; - machine_submodel_id = boot_params.sys_desc_table.table[1]; - BIOS_revision = boot_params.sys_desc_table.table[2]; - } #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 71820c42b6ce..da52e6bb5c7f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -31,11 +31,11 @@ #include <asm/vdso.h> #include <asm/mce.h> #include <asm/sighandling.h> +#include <asm/vm86.h> #ifdef CONFIG_X86_64 #include <asm/proto.h> #include <asm/ia32_unistd.h> -#include <asm/sys_ia32.h> #endif /* CONFIG_X86_64 */ #include <asm/syscall.h> @@ -632,6 +632,9 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) bool stepping, failed; struct fpu *fpu = ¤t->thread.fpu; + if (v8086_mode(regs)) + save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); + /* Are we from a system call? */ if (syscall_get_nr(current, regs) >= 0) { /* If so, check system call restarting.. */ @@ -697,7 +700,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -static void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct ksignal ksig; @@ -732,32 +735,6 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } -/* - * notification of userspace execution resumption - * - triggered by the TIF_WORK_MASK flags - */ -__visible void -do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) -{ - user_exit(); - - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - - /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } - if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) - fire_user_return_notifiers(); - - user_enter(); -} - void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { struct task_struct *me = current; diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c new file mode 100644 index 000000000000..dc3c0b1c816f --- /dev/null +++ b/arch/x86/kernel/signal_compat.c @@ -0,0 +1,95 @@ +#include <linux/compat.h> +#include <linux/uaccess.h> + +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) +{ + int err = 0; + bool ia32 = test_thread_flag(TIF_IA32); + + if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) + return -EFAULT; + + put_user_try { + /* If you change siginfo_t structure, please make sure that + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. */ + put_user_ex(from->si_signo, &to->si_signo); + put_user_ex(from->si_errno, &to->si_errno); + put_user_ex((short)from->si_code, &to->si_code); + + if (from->si_code < 0) { + put_user_ex(from->si_pid, &to->si_pid); + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); + } else { + /* + * First 32bits of unions are always present: + * si_pid === si_band === si_tid === si_addr(LS half) + */ + put_user_ex(from->_sifields._pad[0], + &to->_sifields._pad[0]); + switch (from->si_code >> 16) { + case __SI_FAULT >> 16: + break; + case __SI_SYS >> 16: + put_user_ex(from->si_syscall, &to->si_syscall); + put_user_ex(from->si_arch, &to->si_arch); + break; + case __SI_CHLD >> 16: + if (ia32) { + put_user_ex(from->si_utime, &to->si_utime); + put_user_ex(from->si_stime, &to->si_stime); + } else { + put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); + put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); + } + put_user_ex(from->si_status, &to->si_status); + /* FALL THROUGH */ + default: + case __SI_KILL >> 16: + put_user_ex(from->si_uid, &to->si_uid); + break; + case __SI_POLL >> 16: + put_user_ex(from->si_fd, &to->si_fd); + break; + case __SI_TIMER >> 16: + put_user_ex(from->si_overrun, &to->si_overrun); + put_user_ex(ptr_to_compat(from->si_ptr), + &to->si_ptr); + break; + /* This is not generated by the kernel as of now. */ + case __SI_RT >> 16: + case __SI_MESGQ >> 16: + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(from->si_int, &to->si_int); + break; + } + } + } put_user_catch(err); + + return err; +} + +int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) +{ + int err = 0; + u32 ptr32; + + if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) + return -EFAULT; + + get_user_try { + get_user_ex(to->si_signo, &from->si_signo); + get_user_ex(to->si_errno, &from->si_errno); + get_user_ex(to->si_code, &from->si_code); + + get_user_ex(to->si_pid, &from->si_pid); + get_user_ex(to->si_uid, &from->si_uid); + get_user_ex(ptr32, &from->si_ptr); + to->si_ptr = compat_ptr(ptr32); + } get_user_catch(err); + + return err; +} diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 15aaa69bbb5e..12c8286206ce 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -30,6 +30,7 @@ #include <asm/proto.h> #include <asm/apic.h> #include <asm/nmi.h> +#include <asm/mce.h> #include <asm/trace/irq_vectors.h> /* * Some notes on x86 processor bugs affecting SMP operation: @@ -243,6 +244,7 @@ static void native_stop_other_cpus(int wait) finish: local_irq_save(flags); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); local_irq_restore(flags); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b1f3ed9c7a9e..e0c198e5f920 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -97,8 +97,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -atomic_t init_deasserted; - static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) { unsigned long flags; @@ -146,16 +144,11 @@ static void smp_callin(void) /* * If waken up by an INIT in an 82489DX configuration - * we may get here before an INIT-deassert IPI reaches - * our local APIC. We have to wait for the IPI or we'll - * lock up on an APIC access. - * - * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI. + * cpu_callout_mask guarantees we don't get here before + * an INIT_deassert IPI reaches our local APIC, so it is + * now safe to touch our local APIC. */ cpuid = smp_processor_id(); - if (apic->wait_for_init_deassert && cpuid) - while (!atomic_read(&init_deasserted)) - cpu_relax(); /* * (This works even if the APIC is not enabled.) @@ -620,7 +613,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) send_status = safe_apic_wait_icr_idle(); mb(); - atomic_set(&init_deasserted, 1); /* * Should we send STARTUP IPIs ? @@ -665,7 +657,8 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - udelay(300); + if (init_udelay) + udelay(300); pr_debug("Startup point 1\n"); @@ -675,7 +668,8 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - udelay(200); + if (init_udelay) + udelay(200); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); @@ -859,8 +853,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) * the targeted processor. */ - atomic_set(&init_deasserted, 0); - if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { pr_debug("Setting warm reset code and vector.\n"); @@ -898,7 +890,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) if (!boot_error) { /* - * Wait 10s total for a response from AP + * Wait 10s total for first sign of life from AP */ boot_error = -1; timeout = jiffies + 10*HZ; @@ -911,7 +903,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) boot_error = 0; break; } - udelay(100); schedule(); } } @@ -927,7 +918,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) * for the MTRR work(triggered by the AP coming online) * to be completed in the stop machine context. */ - udelay(100); schedule(); } } @@ -1358,7 +1348,7 @@ static void remove_siblinginfo(int cpu) cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); } -static void __ref remove_cpu_from_maps(int cpu) +static void remove_cpu_from_maps(int cpu) { set_cpu_online(cpu, false); cpumask_clear_cpu(cpu, cpu_callout_mask); diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 0ccb53a9fcd9..c9a073866ca7 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -18,6 +18,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re return addr; } +#ifdef CONFIG_MODIFY_LDT_SYSCALL /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the @@ -45,6 +46,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re } mutex_unlock(&child->mm->context.lock); } +#endif return addr; } diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index 649b010da00b..12cbe2b88c0f 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -57,7 +57,7 @@ __setup("cpu0_hotplug", enable_cpu0_hotplug); * * This is only called for debugging CPU offline/online feature. */ -int __ref _debug_hotplug_cpu(int cpu, int action) +int _debug_hotplug_cpu(int cpu, int action) { struct device *dev = get_cpu_device(cpu); int ret; @@ -104,7 +104,7 @@ static int __init debug_hotplug_cpu(void) late_initcall_sync(debug_hotplug_cpu); #endif /* CONFIG_DEBUG_HOTPLUG_CPU0 */ -int __ref arch_register_cpu(int num) +int arch_register_cpu(int num) { struct cpuinfo_x86 *c = &cpu_data(num); diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c index 25b993729f9b..80bb24d9b880 100644 --- a/arch/x86/kernel/trace_clock.c +++ b/arch/x86/kernel/trace_clock.c @@ -12,10 +12,5 @@ */ u64 notrace trace_clock_x86_tsc(void) { - u64 ret; - - rdtsc_barrier(); - rdtscll(ret); - - return ret; + return rdtsc_ordered(); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f5791927aa64..346eec73f7db 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -62,6 +62,7 @@ #include <asm/fpu/xstate.h> #include <asm/trace/mpx.h> #include <asm/mpx.h> +#include <asm/vm86.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> @@ -108,13 +109,10 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_count_dec(); } -enum ctx_state ist_enter(struct pt_regs *regs) +void ist_enter(struct pt_regs *regs) { - enum ctx_state prev_state; - if (user_mode(regs)) { - /* Other than that, we're just an exception. */ - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); } else { /* * We might have interrupted pretty much anything. In @@ -123,32 +121,25 @@ enum ctx_state ist_enter(struct pt_regs *regs) * but we need to notify RCU. */ rcu_nmi_enter(); - prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ } /* - * We are atomic because we're on the IST stack (or we're on x86_32, - * in which case we still shouldn't schedule). - * - * This must be after exception_enter(), because exception_enter() - * won't do anything if in_interrupt() returns true. + * We are atomic because we're on the IST stack; or we're on + * x86_32, in which case we still shouldn't schedule; or we're + * on x86_64 and entered from user mode, in which case we're + * still atomic unless ist_begin_non_atomic is called. */ preempt_count_add(HARDIRQ_OFFSET); /* This code is a bit fragile. Test it. */ - rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); - - return prev_state; + RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); } -void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) +void ist_exit(struct pt_regs *regs) { - /* Must be before exception_exit. */ preempt_count_sub(HARDIRQ_OFFSET); - if (user_mode(regs)) - return exception_exit(prev_state); - else + if (!user_mode(regs)) rcu_nmi_exit(); } @@ -162,7 +153,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) * a double fault, it can be safe to schedule. ist_begin_non_atomic() * begins a non-atomic section within an ist_enter()/ist_exit() region. * Callers are responsible for enabling interrupts themselves inside - * the non-atomic section, and callers must call is_end_non_atomic() + * the non-atomic section, and callers must call ist_end_non_atomic() * before ist_exit(). */ void ist_begin_non_atomic(struct pt_regs *regs) @@ -289,17 +280,16 @@ NOKPROBE_SYMBOL(do_trap); static void do_error_trap(struct pt_regs *regs, long error_code, char *str, unsigned long trapnr, int signr) { - enum ctx_state prev_state = exception_enter(); siginfo_t info; + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { conditional_sti(regs); do_trap(trapnr, signr, str, regs, error_code, fill_trap_info(regs, signr, trapnr, &info)); } - - exception_exit(prev_state); } #define DO_ERROR(trapnr, signr, str, name) \ @@ -351,7 +341,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) } #endif - ist_enter(regs); /* Discard prev_state because we won't return. */ + ist_enter(regs); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); tsk->thread.error_code = error_code; @@ -371,14 +361,13 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; const struct bndcsr *bndcsr; siginfo_t *info; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); if (notify_die(DIE_TRAP, "bounds", regs, error_code, X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) - goto exit; + return; conditional_sti(regs); if (!user_mode(regs)) @@ -435,9 +424,8 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) die("bounds", regs, error_code); } -exit: - exception_exit(prev_state); return; + exit_trap: /* * This path out is for all the cases where we could not @@ -447,35 +435,33 @@ exit_trap: * time.. */ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); - exception_exit(prev_state); } dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; - enum ctx_state prev_state; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); conditional_sti(regs); if (v8086_mode(regs)) { local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); - goto exit; + return; } tsk = current; if (!user_mode(regs)) { if (fixup_exception(regs)) - goto exit; + return; tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (notify_die(DIE_GPF, "general protection fault", regs, error_code, X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) die("general protection fault", regs, error_code); - goto exit; + return; } tsk->thread.error_code = error_code; @@ -491,16 +477,12 @@ do_general_protection(struct pt_regs *regs, long error_code) } force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); -exit: - exception_exit(prev_state); } NOKPROBE_SYMBOL(do_general_protection); /* May run on IST stack. */ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - #ifdef CONFIG_DYNAMIC_FTRACE /* * ftrace must be first, everything else may cause a recursive crash. @@ -513,7 +495,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; - prev_state = ist_enter(regs); + ist_enter(regs); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -539,7 +522,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) preempt_conditional_cli(regs); debug_stack_usage_dec(); exit: - ist_exit(regs, prev_state); + ist_exit(regs); } NOKPROBE_SYMBOL(do_int3); @@ -615,12 +598,11 @@ NOKPROBE_SYMBOL(fixup_bad_iret); dotraplinkage void do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; - enum ctx_state prev_state; int user_icebp = 0; unsigned long dr6; int si_code; - prev_state = ist_enter(regs); + ist_enter(regs); get_debugreg(dr6, 6); @@ -695,7 +677,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: - ist_exit(regs, prev_state); + ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); @@ -747,21 +729,15 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_MF); - exception_exit(prev_state); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_XF); - exception_exit(prev_state); } dotraplinkage void @@ -773,9 +749,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); BUG_ON(use_eager_fpu()); #ifdef CONFIG_MATH_EMULATION @@ -786,7 +760,6 @@ do_device_not_available(struct pt_regs *regs, long error_code) info.regs = regs; math_emulate(&info); - exception_exit(prev_state); return; } #endif @@ -794,7 +767,6 @@ do_device_not_available(struct pt_regs *regs, long error_code) #ifdef CONFIG_X86_32 conditional_sti(regs); #endif - exception_exit(prev_state); } NOKPROBE_SYMBOL(do_device_not_available); @@ -802,9 +774,8 @@ NOKPROBE_SYMBOL(do_device_not_available); dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; - enum ctx_state prev_state; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); local_irq_enable(); info.si_signo = SIGILL; @@ -816,7 +787,6 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, &info); } - exception_exit(prev_state); } #endif diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7437b41f6a47..79055cf2c497 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) data = cyc2ns_write_begin(cpu); - rdtscll(tsc_now); + tsc_now = rdtsc(); ns_now = cycles_2_ns(tsc_now); /* @@ -290,12 +290,20 @@ u64 native_sched_clock(void) } /* read the Time Stamp Counter: */ - rdtscll(tsc_now); + tsc_now = rdtsc(); /* return the value in ns */ return cycles_2_ns(tsc_now); } +/* + * Generate a sched_clock if you already have a TSC value. + */ +u64 native_sched_clock_from_tsc(u64 tsc) +{ + return cycles_2_ns(tsc); +} + /* We need to define a real function for sched_clock, to override the weak default version */ #ifdef CONFIG_PARAVIRT @@ -308,12 +316,6 @@ unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); #endif -unsigned long long native_read_tsc(void) -{ - return __native_read_tsc(); -} -EXPORT_SYMBOL(native_read_tsc); - int check_tsc_unstable(void) { return tsc_unstable; @@ -976,7 +978,7 @@ static struct clocksource clocksource_tsc; */ static cycle_t read_tsc(struct clocksource *cs) { - return (cycle_t)get_cycles(); + return (cycle_t)rdtsc_ordered(); } /* diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index dd8d0791dfb5..78083bf23ed1 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -39,16 +39,15 @@ static cycles_t max_warp; static int nr_warps; /* - * TSC-warp measurement loop running on both CPUs: + * TSC-warp measurement loop running on both CPUs. This is not called + * if there is no TSC. */ static void check_tsc_warp(unsigned int timeout) { cycles_t start, now, prev, end; int i; - rdtsc_barrier(); - start = get_cycles(); - rdtsc_barrier(); + start = rdtsc_ordered(); /* * The measurement runs for 'timeout' msecs: */ @@ -63,9 +62,7 @@ static void check_tsc_warp(unsigned int timeout) */ arch_spin_lock(&sync_lock); prev = last_tsc; - rdtsc_barrier(); - now = get_cycles(); - rdtsc_barrier(); + now = rdtsc_ordered(); last_tsc = now; arch_spin_unlock(&sync_lock); @@ -126,7 +123,7 @@ void check_tsc_sync_source(int cpu) /* * No need to check if we already know that the TSC is not - * synchronized: + * synchronized or if we have no TSC. */ if (unsynchronized_tsc()) return; @@ -190,6 +187,7 @@ void check_tsc_sync_target(void) { int cpus = 2; + /* Also aborts if there is no TSC. */ if (unsynchronized_tsc() || tsc_clocksource_reliable) return; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 66476244731e..bf4db6eaec8f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -985,3 +985,12 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return -1; } + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */ + return regs->sp < ret->stack; + else + return regs->sp <= ret->stack; +} diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index fc9db6ef2a95..abd8b856bd2b 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -44,11 +44,14 @@ #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> +#include <asm/traps.h> +#include <asm/vm86.h> /* * Known problems: @@ -66,10 +69,6 @@ */ -#define KVM86 ((struct kernel_vm86_struct *)regs) -#define VMPI KVM86->vm86plus - - /* * 8- and 16-bit register defines.. */ @@ -81,8 +80,8 @@ /* * virtual flags (16 and 32-bit versions) */ -#define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) -#define VEFLAGS (current->thread.v86flags) +#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) +#define VEFLAGS (current->thread.vm86->veflags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) @@ -90,46 +89,13 @@ #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) -/* convert kernel_vm86_regs to vm86_regs */ -static int copy_vm86_regs_to_user(struct vm86_regs __user *user, - const struct kernel_vm86_regs *regs) -{ - int ret = 0; - - /* - * kernel_vm86_regs is missing gs, so copy everything up to - * (but not including) orig_eax, and then rest including orig_eax. - */ - ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); - ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, - sizeof(struct kernel_vm86_regs) - - offsetof(struct kernel_vm86_regs, pt.orig_ax)); - - return ret; -} - -/* convert vm86_regs to kernel_vm86_regs */ -static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, - const struct vm86_regs __user *user, - unsigned extra) -{ - int ret = 0; - - /* copy ax-fs inclusive */ - ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); - /* copy orig_ax-__gsh+extra */ - ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, - sizeof(struct kernel_vm86_regs) - - offsetof(struct kernel_vm86_regs, pt.orig_ax) + - extra); - return ret; -} - -struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) +void save_v86_state(struct kernel_vm86_regs *regs, int retval) { struct tss_struct *tss; - struct pt_regs *ret; - unsigned long tmp; + struct task_struct *tsk = current; + struct vm86plus_struct __user *user; + struct vm86 *vm86 = current->thread.vm86; + long err = 0; /* * This gets called from entry.S with interrupts disabled, but @@ -138,31 +104,57 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) */ local_irq_enable(); - if (!current->thread.vm86_info) { - pr_alert("no vm86_info: BAD\n"); + if (!vm86 || !vm86->user_vm86) { + pr_alert("no user_vm86: BAD\n"); do_exit(SIGSEGV); } - set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); - tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); - tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); - if (tmp) { - pr_alert("could not access userspace vm86_info\n"); + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); + user = vm86->user_vm86; + + if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? + sizeof(struct vm86plus_struct) : + sizeof(struct vm86_struct))) { + pr_alert("could not access userspace vm86 info\n"); + do_exit(SIGSEGV); + } + + put_user_try { + put_user_ex(regs->pt.bx, &user->regs.ebx); + put_user_ex(regs->pt.cx, &user->regs.ecx); + put_user_ex(regs->pt.dx, &user->regs.edx); + put_user_ex(regs->pt.si, &user->regs.esi); + put_user_ex(regs->pt.di, &user->regs.edi); + put_user_ex(regs->pt.bp, &user->regs.ebp); + put_user_ex(regs->pt.ax, &user->regs.eax); + put_user_ex(regs->pt.ip, &user->regs.eip); + put_user_ex(regs->pt.cs, &user->regs.cs); + put_user_ex(regs->pt.flags, &user->regs.eflags); + put_user_ex(regs->pt.sp, &user->regs.esp); + put_user_ex(regs->pt.ss, &user->regs.ss); + put_user_ex(regs->es, &user->regs.es); + put_user_ex(regs->ds, &user->regs.ds); + put_user_ex(regs->fs, &user->regs.fs); + put_user_ex(regs->gs, &user->regs.gs); + + put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); + } put_user_catch(err); + if (err) { + pr_alert("could not access userspace vm86 info\n"); do_exit(SIGSEGV); } tss = &per_cpu(cpu_tss, get_cpu()); - current->thread.sp0 = current->thread.saved_sp0; - current->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, ¤t->thread); - current->thread.saved_sp0 = 0; + tsk->thread.sp0 = vm86->saved_sp0; + tsk->thread.sysenter_cs = __KERNEL_CS; + load_sp0(tss, &tsk->thread); + vm86->saved_sp0 = 0; put_cpu(); - ret = KVM86->regs32; + memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); - ret->fs = current->thread.saved_fs; - set_user_gs(ret, current->thread.saved_gs); + lazy_load_gs(vm86->regs32.gs); - return ret; + regs->pt.ax = retval; } static void mark_screen_rdonly(struct mm_struct *mm) @@ -200,45 +192,16 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); -static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); +static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); -SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) { - struct kernel_vm86_struct info; /* declare this _on top_, - * this avoids wasting of stack space. - * This remains on the stack until we - * return to 32 bit user space. - */ - struct task_struct *tsk = current; - int tmp; - - if (tsk->thread.saved_sp0) - return -EPERM; - tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, - offsetof(struct kernel_vm86_struct, vm86plus) - - sizeof(info.regs)); - if (tmp) - return -EFAULT; - memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); - info.regs32 = current_pt_regs(); - tsk->thread.vm86_info = v86; - do_sys_vm86(&info, tsk); - return 0; /* we never return here */ + return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); } SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { - struct kernel_vm86_struct info; /* declare this _on top_, - * this avoids wasting of stack space. - * This remains on the stack until we - * return to 32 bit user space. - */ - struct task_struct *tsk; - int tmp; - struct vm86plus_struct __user *v86; - - tsk = current; switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: @@ -256,114 +219,133 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ - if (tsk->thread.saved_sp0) - return -EPERM; - v86 = (struct vm86plus_struct __user *)arg; - tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, - offsetof(struct kernel_vm86_struct, regs32) - - sizeof(info.regs)); - if (tmp) - return -EFAULT; - info.regs32 = current_pt_regs(); - info.vm86plus.is_vm86pus = 1; - tsk->thread.vm86_info = (struct vm86_struct __user *)v86; - do_sys_vm86(&info, tsk); - return 0; /* we never return here */ + return do_sys_vm86((struct vm86plus_struct __user *) arg, true); } -static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) +static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { struct tss_struct *tss; -/* - * make sure the vm86() system call doesn't try to do anything silly - */ - info->regs.pt.ds = 0; - info->regs.pt.es = 0; - info->regs.pt.fs = 0; -#ifndef CONFIG_X86_32_LAZY_GS - info->regs.pt.gs = 0; -#endif + struct task_struct *tsk = current; + struct vm86 *vm86 = tsk->thread.vm86; + struct kernel_vm86_regs vm86regs; + struct pt_regs *regs = current_pt_regs(); + unsigned long err = 0; + + if (!vm86) { + if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) + return -ENOMEM; + tsk->thread.vm86 = vm86; + } + if (vm86->saved_sp0) + return -EPERM; + + if (!access_ok(VERIFY_READ, user_vm86, plus ? + sizeof(struct vm86_struct) : + sizeof(struct vm86plus_struct))) + return -EFAULT; + + memset(&vm86regs, 0, sizeof(vm86regs)); + get_user_try { + unsigned short seg; + get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx); + get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx); + get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx); + get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi); + get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi); + get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp); + get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax); + get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip); + get_user_ex(seg, &user_vm86->regs.cs); + vm86regs.pt.cs = seg; + get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags); + get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp); + get_user_ex(seg, &user_vm86->regs.ss); + vm86regs.pt.ss = seg; + get_user_ex(vm86regs.es, &user_vm86->regs.es); + get_user_ex(vm86regs.ds, &user_vm86->regs.ds); + get_user_ex(vm86regs.fs, &user_vm86->regs.fs); + get_user_ex(vm86regs.gs, &user_vm86->regs.gs); + + get_user_ex(vm86->flags, &user_vm86->flags); + get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap); + get_user_ex(vm86->cpu_type, &user_vm86->cpu_type); + } get_user_catch(err); + if (err) + return err; + + if (copy_from_user(&vm86->int_revectored, + &user_vm86->int_revectored, + sizeof(struct revectored_struct))) + return -EFAULT; + if (copy_from_user(&vm86->int21_revectored, + &user_vm86->int21_revectored, + sizeof(struct revectored_struct))) + return -EFAULT; + if (plus) { + if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, + sizeof(struct vm86plus_info_struct))) + return -EFAULT; + vm86->vm86plus.is_vm86pus = 1; + } else + memset(&vm86->vm86plus, 0, + sizeof(struct vm86plus_info_struct)); + + memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); + vm86->user_vm86 = user_vm86; /* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ - VEFLAGS = info->regs.pt.flags; - info->regs.pt.flags &= SAFE_MASK; - info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; - info->regs.pt.flags |= X86_VM_MASK; + VEFLAGS = vm86regs.pt.flags; + vm86regs.pt.flags &= SAFE_MASK; + vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; + vm86regs.pt.flags |= X86_VM_MASK; + + vm86regs.pt.orig_ax = regs->orig_ax; - switch (info->cpu_type) { + switch (vm86->cpu_type) { case CPU_286: - tsk->thread.v86mask = 0; + vm86->veflags_mask = 0; break; case CPU_386: - tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: - tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: - tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } /* - * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) + * Save old state */ - info->regs32->ax = VM86_SIGNAL; - tsk->thread.saved_sp0 = tsk->thread.sp0; - tsk->thread.saved_fs = info->regs32->fs; - tsk->thread.saved_gs = get_user_gs(info->regs32); + vm86->saved_sp0 = tsk->thread.sp0; + lazy_save_gs(vm86->regs32.gs); tss = &per_cpu(cpu_tss, get_cpu()); - tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; + /* make room for real-mode segments */ + tsk->thread.sp0 += 16; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; load_sp0(tss, &tsk->thread); put_cpu(); - tsk->thread.screen_bitmap = info->screen_bitmap; - if (info->flags & VM86_SCREEN_BITMAP) + if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); - /*call __audit_syscall_exit since we do not exit via the normal paths */ -#ifdef CONFIG_AUDITSYSCALL - if (unlikely(current->audit_context)) - __audit_syscall_exit(1, 0); -#endif - - __asm__ __volatile__( - "movl %0,%%esp\n\t" - "movl %1,%%ebp\n\t" -#ifdef CONFIG_X86_32_LAZY_GS - "mov %2, %%gs\n\t" -#endif - "jmp resume_userspace" - : /* no outputs */ - :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); - /* we never return here */ -} - -static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) -{ - struct pt_regs *regs32; - - regs32 = save_v86_state(regs16); - regs32->ax = retval; - __asm__ __volatile__("movl %0,%%esp\n\t" - "movl %1,%%ebp\n\t" - "jmp resume_userspace" - : : "r" (regs32), "r" (current_thread_info())); + memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); + force_iret(); + return regs->ax; } static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= X86_EFLAGS_VIF; - if (VEFLAGS & X86_EFLAGS_VIP) - return_to_32bit(regs, VM86_STI); } static inline void clear_IF(struct kernel_vm86_regs *regs) @@ -395,7 +377,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs) static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { - set_flags(VEFLAGS, flags, current->thread.v86mask); + set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -405,7 +387,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { - set_flags(VFLAGS, flags, current->thread.v86mask); + set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -420,7 +402,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; - return flags | (VEFLAGS & current->thread.v86mask); + return flags | (VEFLAGS & current->thread.vm86->veflags_mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) @@ -518,12 +500,13 @@ static void do_int(struct kernel_vm86_regs *regs, int i, { unsigned long __user *intr_ptr; unsigned long segoffs; + struct vm86 *vm86 = current->thread.vm86; if (regs->pt.cs == BIOSSEG) goto cannot_handle; - if (is_revectored(i, &KVM86->int_revectored)) + if (is_revectored(i, &vm86->int_revectored)) goto cannot_handle; - if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) + if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) @@ -542,18 +525,16 @@ static void do_int(struct kernel_vm86_regs *regs, int i, return; cannot_handle: - return_to_32bit(regs, VM86_INTx + (i << 8)); + save_v86_state(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { - if (VMPI.is_vm86pus) { + struct vm86 *vm86 = current->thread.vm86; + + if (vm86->vm86plus.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) { - KVM86->regs32->ax = VM86_TRAP + (trapno << 8); - /* setting this flag forces the code in entry_32.S to - the path where we call save_v86_state() and change - the stack pointer to KVM86->regs32 */ - set_thread_flag(TIF_NOTIFY_RESUME); + save_v86_state(regs, VM86_TRAP + (trapno << 8)); return 0; } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); @@ -574,16 +555,11 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; + struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus; #define CHECK_IF_IN_TRAP \ - if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ + if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ newflags |= X86_EFLAGS_TF -#define VM86_FAULT_RETURN do { \ - if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ - return_to_32bit(regs, VM86_PICRETURN); \ - if (orig_flags & X86_EFLAGS_TF) \ - handle_vm86_trap(regs, 0, 1); \ - return; } while (0) orig_flags = *(unsigned short *)®s->pt.flags; @@ -622,7 +598,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) SP(regs) -= 2; } IP(regs) = ip; - VM86_FAULT_RETURN; + goto vm86_fault_return; /* popf */ case 0x9d: @@ -642,16 +618,18 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) else set_vflags_short(newflags, regs); - VM86_FAULT_RETURN; + goto check_vip; } /* int xx */ case 0xcd: { int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; - if (VMPI.vm86dbg_active) { - if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) - return_to_32bit(regs, VM86_INTx + (intno << 8)); + if (vmpi->vm86dbg_active) { + if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { + save_v86_state(regs, VM86_INTx + (intno << 8)); + return; + } } do_int(regs, intno, ssp, sp); return; @@ -682,14 +660,14 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) } else { set_vflags_short(newflags, regs); } - VM86_FAULT_RETURN; + goto check_vip; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); - VM86_FAULT_RETURN; + goto vm86_fault_return; /* sti */ /* @@ -701,14 +679,29 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) case 0xfb: IP(regs) = ip; set_IF(regs); - VM86_FAULT_RETURN; + goto check_vip; default: - return_to_32bit(regs, VM86_UNKNOWN); + save_v86_state(regs, VM86_UNKNOWN); } return; +check_vip: + if (VEFLAGS & X86_EFLAGS_VIP) { + save_v86_state(regs, VM86_STI); + return; + } + +vm86_fault_return: + if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { + save_v86_state(regs, VM86_PICRETURN); + return; + } + if (orig_flags & X86_EFLAGS_TF) + handle_vm86_trap(regs, 0, X86_TRAP_DB); + return; + simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should @@ -720,7 +713,7 @@ simulate_sigsegv: * should be a mixture of the two, but how do we * get the information? [KD] */ - return_to_32bit(regs, VM86_UNKNOWN); + save_v86_state(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 67d215cb8953..a1ff508bb423 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,9 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ - i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o + i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ + hyperv.o + kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o kvm-intel-y += vmx.o pmu_intel.o kvm-amd-y += svm.o pmu_amd.o diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c new file mode 100644 index 000000000000..a8160d2ae362 --- /dev/null +++ b/arch/x86/kvm/hyperv.c @@ -0,0 +1,377 @@ +/* + * KVM Microsoft Hyper-V emulation + * + * derived from arch/x86/kvm/x86.c + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright (C) 2008 Qumranet, Inc. + * Copyright IBM Corporation, 2008 + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Avi Kivity <avi@qumranet.com> + * Yaniv Kamay <yaniv@qumranet.com> + * Amit Shah <amit.shah@qumranet.com> + * Ben-Ami Yassour <benami@il.ibm.com> + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "x86.h" +#include "lapic.h" +#include "hyperv.h" + +#include <linux/kvm_host.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +static bool kvm_hv_msr_partition_wide(u32 msr) +{ + bool r = false; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + case HV_X64_MSR_HYPERCALL: + case HV_X64_MSR_REFERENCE_TSC: + case HV_X64_MSR_TIME_REF_COUNT: + case HV_X64_MSR_CRASH_CTL: + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + r = true; + break; + } + + return r; +} + +static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, + u32 index, u64 *pdata) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + return -EINVAL; + + *pdata = hv->hv_crash_param[index]; + return 0; +} + +static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + *pdata = hv->hv_crash_ctl; + return 0; +} + +static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (host) + hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; + + if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { + + vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", + hv->hv_crash_param[0], + hv->hv_crash_param[1], + hv->hv_crash_param[2], + hv->hv_crash_param[3], + hv->hv_crash_param[4]); + + /* Send notification about crash to user space */ + kvm_make_request(KVM_REQ_HV_CRASH, vcpu); + } + + return 0; +} + +static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, + u32 index, u64 data) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + return -EINVAL; + + hv->hv_crash_param[index] = data; + return 0; +} + +static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, + bool host) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_hv *hv = &kvm->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + hv->hv_guest_os_id = data; + /* setting guest os id to zero disables hypercall page */ + if (!hv->hv_guest_os_id) + hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; + break; + case HV_X64_MSR_HYPERCALL: { + u64 gfn; + unsigned long addr; + u8 instructions[4]; + + /* if guest os id is not set hypercall should remain disabled */ + if (!hv->hv_guest_os_id) + break; + if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { + hv->hv_hypercall = data; + break; + } + gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; + addr = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(addr)) + return 1; + kvm_x86_ops->patch_hypercall(vcpu, instructions); + ((unsigned char *)instructions)[3] = 0xc3; /* ret */ + if (__copy_to_user((void __user *)addr, instructions, 4)) + return 1; + hv->hv_hypercall = data; + mark_page_dirty(kvm, gfn); + break; + } + case HV_X64_MSR_REFERENCE_TSC: { + u64 gfn; + HV_REFERENCE_TSC_PAGE tsc_ref; + + memset(&tsc_ref, 0, sizeof(tsc_ref)); + hv->hv_tsc_page = data; + if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) + break; + gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; + if (kvm_write_guest( + kvm, + gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, + &tsc_ref, sizeof(tsc_ref))) + return 1; + mark_page_dirty(kvm, gfn); + break; + } + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + return kvm_hv_msr_set_crash_data(vcpu, + msr - HV_X64_MSR_CRASH_P0, + data); + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_msr_set_crash_ctl(vcpu, data, host); + default: + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", + msr, data); + return 1; + } + return 0; +} + +static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) +{ + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_APIC_ASSIST_PAGE: { + u64 gfn; + unsigned long addr; + + if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { + hv->hv_vapic = data; + if (kvm_lapic_enable_pv_eoi(vcpu, 0)) + return 1; + break; + } + gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; + addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(addr)) + return 1; + if (__clear_user((void __user *)addr, PAGE_SIZE)) + return 1; + hv->hv_vapic = data; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (kvm_lapic_enable_pv_eoi(vcpu, + gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) + return 1; + break; + } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); + default: + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", + msr, data); + return 1; + } + + return 0; +} + +static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + struct kvm *kvm = vcpu->kvm; + struct kvm_hv *hv = &kvm->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + data = hv->hv_guest_os_id; + break; + case HV_X64_MSR_HYPERCALL: + data = hv->hv_hypercall; + break; + case HV_X64_MSR_TIME_REF_COUNT: { + data = + div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); + break; + } + case HV_X64_MSR_REFERENCE_TSC: + data = hv->hv_tsc_page; + break; + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + return kvm_hv_msr_get_crash_data(vcpu, + msr - HV_X64_MSR_CRASH_P0, + pdata); + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_msr_get_crash_ctl(vcpu, pdata); + default: + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + + *pdata = data; + return 0; +} + +static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_VP_INDEX: { + int r; + struct kvm_vcpu *v; + + kvm_for_each_vcpu(r, v, vcpu->kvm) { + if (v == vcpu) { + data = r; + break; + } + } + break; + } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); + case HV_X64_MSR_APIC_ASSIST_PAGE: + data = hv->hv_vapic; + break; + default: + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + *pdata = data; + return 0; +} + +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) +{ + if (kvm_hv_msr_partition_wide(msr)) { + int r; + + mutex_lock(&vcpu->kvm->lock); + r = kvm_hv_set_msr_pw(vcpu, msr, data, host); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return kvm_hv_set_msr(vcpu, msr, data); +} + +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + if (kvm_hv_msr_partition_wide(msr)) { + int r; + + mutex_lock(&vcpu->kvm->lock); + r = kvm_hv_get_msr_pw(vcpu, msr, pdata); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return kvm_hv_get_msr(vcpu, msr, pdata); +} + +bool kvm_hv_hypercall_enabled(struct kvm *kvm) +{ + return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; +} + +int kvm_hv_hypercall(struct kvm_vcpu *vcpu) +{ + u64 param, ingpa, outgpa, ret; + uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; + bool fast, longmode; + + /* + * hypercall generates UD from non zero cpl and real mode + * per HYPER-V spec + */ + if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + longmode = is_64_bit_mode(vcpu); + + if (!longmode) { + param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); + ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); + outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); + } +#ifdef CONFIG_X86_64 + else { + param = kvm_register_read(vcpu, VCPU_REGS_RCX); + ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); + outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); + } +#endif + + code = param & 0xffff; + fast = (param >> 16) & 0x1; + rep_cnt = (param >> 32) & 0xfff; + rep_idx = (param >> 48) & 0xfff; + + trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); + + switch (code) { + case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: + kvm_vcpu_on_spin(vcpu); + break; + default: + res = HV_STATUS_INVALID_HYPERCALL_CODE; + break; + } + + ret = res | (((u64)rep_done & 0xfff) << 32); + if (longmode) { + kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + } else { + kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); + kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); + } + + return 1; +} diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h new file mode 100644 index 000000000000..c7bce559f67b --- /dev/null +++ b/arch/x86/kvm/hyperv.h @@ -0,0 +1,32 @@ +/* + * KVM Microsoft Hyper-V emulation + * + * derived from arch/x86/kvm/x86.c + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright (C) 2008 Qumranet, Inc. + * Copyright IBM Corporation, 2008 + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Avi Kivity <avi@qumranet.com> + * Yaniv Kamay <yaniv@qumranet.com> + * Amit Shah <amit.shah@qumranet.com> + * Ben-Ami Yassour <benami@il.ibm.com> + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef __ARCH_X86_KVM_HYPERV_H__ +#define __ARCH_X86_KVM_HYPERV_H__ + +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); +bool kvm_hv_hypercall_enabled(struct kvm *kvm); +int kvm_hv_hypercall(struct kvm_vcpu *vcpu); + +#endif diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index fef922ff2635..7cc2360f1848 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -651,15 +651,10 @@ fail_unlock: return NULL; } -void kvm_destroy_pic(struct kvm *kvm) +void kvm_destroy_pic(struct kvm_pic *vpic) { - struct kvm_pic *vpic = kvm->arch.vpic; - - if (vpic) { - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); - kvm->arch.vpic = NULL; - kfree(vpic); - } + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); + kfree(vpic); } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ad68c73008c5..3d782a2c336a 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -74,7 +74,7 @@ struct kvm_pic { }; struct kvm_pic *kvm_create_pic(struct kvm *kvm); -void kvm_destroy_pic(struct kvm *kvm); +void kvm_destroy_pic(struct kvm_pic *vpic); int kvm_pic_read_irq(struct kvm *kvm); void kvm_pic_update_irq(struct kvm_pic *s); @@ -85,11 +85,11 @@ static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) static inline int irqchip_in_kernel(struct kvm *kvm) { - int ret; + struct kvm_pic *vpic = pic_irqchip(kvm); - ret = (pic_irqchip(kvm) != NULL); + /* Read vpic before kvm->irq_routing. */ smp_rmb(); - return ret; + return vpic != NULL; } void kvm_pic_reset(struct kvm_kpic_state *s); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2a5ca97c263b..8d9013c5e1ee 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) tsc_deadline = apic->lapic_timer.expired_tscdeadline; apic->lapic_timer.expired_tscdeadline = 0; - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ @@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic) local_irq_save(flags); now = apic->lapic_timer.timer.base->get_time(); - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); if (likely(tscdeadline > guest_tsc)) { ns = (tscdeadline - guest_tsc) * 1000000ULL; do_div(ns, this_tsc_khz); @@ -1900,8 +1900,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, - sizeof(u32)); + if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32))) + return; apic_set_tpr(vcpu->arch.apic, data & 0xff); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 71952748222a..764037991d26 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -91,7 +91,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) { - return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; + return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; } int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 44171462bd2a..fb16a8ea3dee 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep) { return ACCESS_ONCE(*sptep); } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - /* It is valid if the spte is zapped. */ - return spte == 0ull; -} #else union split_spte { struct { @@ -478,23 +472,6 @@ retry: return spte.spte; } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - union split_spte sspte = (union split_spte)spte; - u32 high_mmio_mask = shadow_mmio_mask >> 32; - - /* It is valid if the spte is zapped. */ - if (spte == 0ull) - return true; - - /* It is valid if the spte is being zapped. */ - if (sspte.spte_low == 0ull && - (sspte.spte_high & high_mmio_mask) == high_mmio_mask) - return true; - - return false; -} #endif static bool spte_is_locklessly_modifiable(u64 spte) @@ -3291,54 +3268,89 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); } -static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +static bool +__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { - if (direct) - return vcpu_match_mmio_gpa(vcpu, addr); + int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f; - return vcpu_match_mmio_gva(vcpu, addr); + return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) | + ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0); } +static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) +{ + return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); +} -/* - * On direct hosts, the last spte is only allows two states - * for mmio page fault: - * - It is the mmio spte - * - It is zapped or it is being zapped. - * - * This function completely checks the spte when the last spte - * is not the mmio spte. - */ -static bool check_direct_spte_mmio_pf(u64 spte) +static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) { - return __check_direct_spte_mmio_pf(spte); + return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); } -static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) +static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + if (direct) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + +/* return true if reserved bit is detected on spte. */ +static bool +walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) { struct kvm_shadow_walk_iterator iterator; - u64 spte = 0ull; + u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; + int root, leaf; + bool reserved = false; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return spte; + goto exit; walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) + + for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level; + shadow_walk_okay(&iterator); + __shadow_walk_next(&iterator, spte)) { + leaf = iterator.level; + spte = mmu_spte_get_lockless(iterator.sptep); + + sptes[leaf - 1] = spte; + if (!is_shadow_present_pte(spte)) break; + + reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, + leaf); + } + walk_shadow_page_lockless_end(vcpu); - return spte; + if (reserved) { + pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", + __func__, addr); + while (root >= leaf) { + pr_err("------ spte 0x%llx level %d.\n", + sptes[root - 1], root); + root--; + } + } +exit: + *sptep = spte; + return reserved; } int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; + bool reserved; if (quickly_check_mmio_pf(vcpu, addr, direct)) return RET_MMIO_PF_EMULATE; - spte = walk_shadow_page_get_mmio_spte(vcpu, addr); + reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); + if (unlikely(reserved)) + return RET_MMIO_PF_BUG; if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); @@ -3356,13 +3368,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) } /* - * It's ok if the gva is remapped by other cpus on shadow guest, - * it's a BUG if the gfn is not a mmio page. - */ - if (direct && !check_direct_spte_mmio_pf(spte)) - return RET_MMIO_PF_BUG; - - /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ @@ -3604,19 +3609,21 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gp #include "paging_tmpl.h" #undef PTTYPE -static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void +__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, int level, bool nx, bool gbpages, + bool pse) { - int maxphyaddr = cpuid_maxphyaddr(vcpu); u64 exb_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; - context->bad_mt_xwr = 0; + rsvd_check->bad_mt_xwr = 0; - if (!context->nx) + if (!nx) exb_bit_rsvd = rsvd_bits(63, 63); - if (!guest_cpuid_has_gbpages(vcpu)) + if (!gbpages) gbpages_bit_rsvd = rsvd_bits(7, 7); /* @@ -3626,80 +3633,95 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, if (guest_cpuid_is_amd(vcpu)) nonleaf_bit8_rsvd = rsvd_bits(8, 8); - switch (context->root_level) { + switch (level) { case PT32_ROOT_LEVEL: /* no rsvd bits for 2 level 4K page table entries */ - context->rsvd_bits_mask[0][1] = 0; - context->rsvd_bits_mask[0][0] = 0; - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[0][1] = 0; + rsvd_check->rsvd_bits_mask[0][0] = 0; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; - if (!is_pse(vcpu)) { - context->rsvd_bits_mask[1][1] = 0; + if (!pse) { + rsvd_check->rsvd_bits_mask[1][1] = 0; break; } if (is_cpuid_PSE36()) /* 36bits PSE 4MB page */ - context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); else /* 32 bits PSE 4MB page */ - context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: - context->rsvd_bits_mask[0][2] = + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(maxphyaddr, 63) | rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ - context->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PDE */ - context->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PTE */ - context->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62) | rsvd_bits(13, 20); /* large page */ - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; break; case PT64_ROOT_LEVEL: - context->rsvd_bits_mask[0][3] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | + nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; - context->rsvd_bits_mask[1][2] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][3] = + rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 29); - context->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20); /* large page */ - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; break; } } -static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, bool execonly) +static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), context->root_level, + context->nx, guest_cpuid_has_gbpages(vcpu), + is_pse(vcpu)); +} + +static void +__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, bool execonly) { - int maxphyaddr = cpuid_maxphyaddr(vcpu); int pte; - context->rsvd_bits_mask[0][3] = + rsvd_check->rsvd_bits_mask[0][3] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - context->rsvd_bits_mask[0][2] = + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - context->rsvd_bits_mask[0][1] = + rsvd_check->rsvd_bits_mask[0][1] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); /* large page */ - context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; - context->rsvd_bits_mask[1][2] = + rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); - context->rsvd_bits_mask[1][1] = + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; for (pte = 0; pte < 64; pte++) { int rwx_bits = pte & 7; @@ -3707,10 +3729,64 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, if (mt == 0x2 || mt == 0x3 || mt == 0x7 || rwx_bits == 0x2 || rwx_bits == 0x6 || (rwx_bits == 0x4 && !execonly)) - context->bad_mt_xwr |= (1ull << pte); + rsvd_check->bad_mt_xwr |= (1ull << pte); } } +static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), execonly); +} + +/* + * the page table on host is the shadow page table for the page + * table in guest or amd nested guest, its mmu features completely + * follow the features in guest. + */ +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, context->nx, + guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); +} +EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); + +/* + * the direct page table on host, use as much mmu features as + * possible, however, kvm currently does not do execution-protection. + */ +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (guest_cpuid_is_amd(vcpu)) + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, false, + cpu_has_gbpages, true); + else + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + false); + +} + +/* + * as the comments in reset_shadow_zero_bits_mask() except it + * is the shadow page table for intel nested guest. + */ +static void +reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, execonly); +} + static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, bool ept) { @@ -3889,6 +3965,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); + reset_tdp_shadow_zero_bits_mask(vcpu, context); } void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) @@ -3916,6 +3993,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) context->base_role.smap_andnot_wp = smap && !is_write_protection(vcpu); context->base_role.smm = is_smm(vcpu); + reset_shadow_zero_bits_mask(vcpu, context); } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); @@ -3939,6 +4017,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) update_permission_bitmask(vcpu, context, true); reset_rsvds_bits_mask_ept(vcpu, context, execonly); + reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); @@ -4860,28 +4939,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) return nr_mmu_pages; } -int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) -{ - struct kvm_shadow_walk_iterator iterator; - u64 spte; - int nr_sptes = 0; - - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return nr_sptes; - - walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { - sptes[iterator.level-1] = spte; - nr_sptes++; - if (!is_shadow_present_pte(spte)) - break; - } - walk_shadow_page_lockless_end(vcpu); - - return nr_sptes; -} -EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); - void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 398d21c0f6dd..e4202e41d535 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -50,9 +50,11 @@ static inline u64 rsvd_bits(int s, int e) return ((1ULL << (e - s + 1)) - 1) << s; } -int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); + /* * Return values of handle_mmio_page_fault_common: * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 0f67d7e24800..736e6ab8784d 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -128,14 +128,6 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) *access &= mask; } -static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) -{ - int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f; - - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | - ((mmu->bad_mt_xwr & (1ull << low6)) != 0); -} - static inline int FNAME(is_present_gpte)(unsigned long pte) { #if PTTYPE != PTTYPE_EPT @@ -172,7 +164,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) { - if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) goto no_present; if (!FNAME(is_present_gpte)(gpte)) @@ -353,8 +345,7 @@ retry_walk: if (unlikely(!FNAME(is_present_gpte)(pte))) goto error; - if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, - walker->level))) { + if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; goto error; } diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index 886aa25a7131..39b91127ef07 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -133,8 +133,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* MSR_K7_PERFCTRn */ pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); if (pmc) { - if (!msr_info->host_initiated) - data = (s64)data; pmc->counter += data - pmc_read_counter(pmc); return 0; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8e0c0844c6b9..fdb8cb63a6c0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1139,7 +1139,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; - tsc = svm_scale_tsc(vcpu, native_read_tsc()); + tsc = svm_scale_tsc(vcpu, rdtsc()); return target_tsc - tsc; } @@ -1173,6 +1173,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) return 0; + if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && + kvm_read_cr0(vcpu) & X86_CR0_CD) + return _PAGE_NOCACHE; + mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); return mtrr2protval[mtrr]; } @@ -1667,13 +1671,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - /* - * re-enable caching here because the QEMU bios - * does not do it - this results in some delay at - * reboot - */ - if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); + + /* These are emulated via page tables. */ + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); + svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -2106,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); + reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } @@ -3172,7 +3174,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_IA32_TSC: { msr_info->data = svm->vmcb->control.tsc_offset + - svm_scale_tsc(vcpu, native_read_tsc()); + svm_scale_tsc(vcpu, rdtsc()); break; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 83b7b5cd75d5..4a4eec30cc08 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; - rdtscll(host_tsc); + host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } @@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { - return target_tsc - native_read_tsc(); + return target_tsc - rdtsc(); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) @@ -2443,10 +2443,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | - CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | - CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; + CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | + CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | + CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | + CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we @@ -3423,12 +3423,12 @@ static void enter_lmode(struct kvm_vcpu *vcpu) vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); - if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { + if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, - (guest_tr_ar & ~AR_TYPE_MASK) - | AR_TYPE_BUSY_64_TSS); + (guest_tr_ar & ~VMX_AR_TYPE_MASK) + | VMX_AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } @@ -3719,7 +3719,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); - return AR_DPL(ar); + return VMX_AR_DPL(ar); } } @@ -3847,11 +3847,11 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) if (cs.unusable) return false; - if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) + if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; - if (cs.type & AR_TYPE_WRITEABLE_MASK) { + if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { @@ -3901,7 +3901,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) return false; if (!var.present) return false; - if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { + if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } @@ -5759,73 +5759,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } -static u64 ept_rsvd_mask(u64 spte, int level) -{ - int i; - u64 mask = 0; - - for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) - mask |= (1ULL << i); - - if (level == 4) - /* bits 7:3 reserved */ - mask |= 0xf8; - else if (spte & (1ULL << 7)) - /* - * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively, - * level == 1 if the hypervisor is using the ignored bit 7. - */ - mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE; - else if (level > 1) - /* bits 6:3 reserved */ - mask |= 0x78; - - return mask; -} - -static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, - int level) -{ - printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); - - /* 010b (write-only) */ - WARN_ON((spte & 0x7) == 0x2); - - /* 110b (write/execute) */ - WARN_ON((spte & 0x7) == 0x6); - - /* 100b (execute-only) and value not supported by logical processor */ - if (!cpu_has_vmx_ept_execute_only()) - WARN_ON((spte & 0x7) == 0x4); - - /* not 000b */ - if ((spte & 0x7)) { - u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); - - if (rsvd_bits != 0) { - printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", - __func__, rsvd_bits); - WARN_ON(1); - } - - /* bits 5:3 are _not_ reserved for large page or leaf page */ - if ((rsvd_bits & 0x38) == 0) { - u64 ept_mem_type = (spte & 0x38) >> 3; - - if (ept_mem_type == 2 || ept_mem_type == 3 || - ept_mem_type == 7) { - printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", - __func__, ept_mem_type); - WARN_ON(1); - } - } - } -} - static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { - u64 sptes[4]; - int nr_sptes, i, ret; + int ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); @@ -5846,13 +5782,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) return 1; /* It is the real ept misconfig */ - printk(KERN_ERR "EPT: Misconfiguration.\n"); - printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); - - nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); - - for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) - ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); + WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; @@ -6246,6 +6176,11 @@ static int handle_mwait(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } +static int handle_monitor_trap(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); @@ -6408,8 +6343,12 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, - u32 vmx_instruction_info, gva_t *ret) + u32 vmx_instruction_info, bool wr, gva_t *ret) { + gva_t off; + bool exn; + struct kvm_segment s; + /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the @@ -6434,22 +6373,63 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ - *ret = vmx_get_segment_base(vcpu, seg_reg); + off = exit_qualification; /* holds the displacement */ if (base_is_valid) - *ret += kvm_register_read(vcpu, base_reg); + off += kvm_register_read(vcpu, base_reg); if (index_is_valid) - *ret += kvm_register_read(vcpu, index_reg)<<scaling; - *ret += exit_qualification; /* holds the displacement */ + off += kvm_register_read(vcpu, index_reg)<<scaling; + vmx_get_segment(vcpu, &s, seg_reg); + *ret = s.base + off; if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; - /* - * TODO: throw #GP (and return 1) in various cases that the VM* - * instructions require it - e.g., offset beyond segment limit, - * unusable or unreadable/unwritable segment, non-canonical 64-bit - * address, and so on. Currently these are not checked. - */ + /* Checks for #GP/#SS exceptions. */ + exn = false; + if (is_protmode(vcpu)) { + /* Protected mode: apply checks for segment validity in the + * following order: + * - segment type check (#GP(0) may be thrown) + * - usability check (#GP(0)/#SS(0)) + * - limit check (#GP(0)/#SS(0)) + */ + if (wr) + /* #GP(0) if the destination operand is located in a + * read-only data segment or any code segment. + */ + exn = ((s.type & 0xa) == 0 || (s.type & 8)); + else + /* #GP(0) if the source operand is located in an + * execute-only code segment + */ + exn = ((s.type & 0xa) == 8); + } + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } + if (is_long_mode(vcpu)) { + /* Long mode: #GP(0)/#SS(0) if the memory address is in a + * non-canonical form. This is an only check for long mode. + */ + exn = is_noncanonical_address(*ret); + } else if (is_protmode(vcpu)) { + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. + */ + exn = (s.unusable != 0); + /* Protected mode: #GP(0)/#SS(0) if the memory + * operand is outside the segment limit. + */ + exn = exn || (off + sizeof(u64) > s.limit); + } + if (exn) { + kvm_queue_exception_e(vcpu, + seg_reg == VCPU_SREG_SS ? + SS_VECTOR : GP_VECTOR, + 0); + return 1; + } + return 0; } @@ -6471,7 +6451,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) + vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, @@ -6999,7 +6979,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &gva)) + vmx_instruction_info, true, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, @@ -7036,7 +7016,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &gva)) + vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { @@ -7128,7 +7108,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &vmcs_gva)) + vmx_instruction_info, true, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, @@ -7184,7 +7164,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, &gva)) + vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { @@ -7282,6 +7262,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, + [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, @@ -7542,6 +7523,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); + case EXIT_REASON_MONITOR_TRAP_FLAG: + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8f0f6eca69da..1e7e76e14e89 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -29,6 +29,7 @@ #include "cpuid.h" #include "assigned-dev.h" #include "pmu.h" +#include "hyperv.h" #include <linux/clocksource.h> #include <linux/interrupt.h> @@ -221,11 +222,9 @@ static void shared_msr_update(unsigned slot, u32 msr) void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); + shared_msrs_global.msrs[slot] = msr; if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; - shared_msrs_global.msrs[slot] = msr; - /* we need ensured the shared_msr_global have been updated */ - smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); @@ -526,7 +525,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && - (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { + (pdpte[i] & + vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { ret = 0; goto out; } @@ -949,6 +949,8 @@ static u32 emulated_msrs[] = { MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, + HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, + HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, @@ -1217,11 +1219,6 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, __func__, base_khz, scaled_khz, shift, *pmultiplier); } -static inline u64 get_kernel_ns(void) -{ - return ktime_get_boot_ns(); -} - #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif @@ -1444,20 +1441,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); static cycle_t read_tsc(void) { - cycle_t ret; - u64 last; - - /* - * Empirically, a fence (of type that depends on the CPU) - * before rdtsc is enough to ensure that rdtsc is ordered - * with respect to loads. The various CPU manuals are unclear - * as to whether rdtsc can be reordered with later loads, - * but no one has ever seen it happen. - */ - rdtsc_barrier(); - ret = (cycle_t)vget_cycles(); - - last = pvclock_gtod_data.clock.cycle_last; + cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; @@ -1646,7 +1631,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) return 1; } if (!use_master_clock) { - host_tsc = native_read_tsc(); + host_tsc = rdtsc(); kernel_ns = get_kernel_ns(); } @@ -1869,123 +1854,6 @@ out: return r; } -static bool kvm_hv_hypercall_enabled(struct kvm *kvm) -{ - return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; -} - -static bool kvm_hv_msr_partition_wide(u32 msr) -{ - bool r = false; - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - case HV_X64_MSR_HYPERCALL: - case HV_X64_MSR_REFERENCE_TSC: - case HV_X64_MSR_TIME_REF_COUNT: - r = true; - break; - } - - return r; -} - -static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) -{ - struct kvm *kvm = vcpu->kvm; - - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - kvm->arch.hv_guest_os_id = data; - /* setting guest os id to zero disables hypercall page */ - if (!kvm->arch.hv_guest_os_id) - kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; - break; - case HV_X64_MSR_HYPERCALL: { - u64 gfn; - unsigned long addr; - u8 instructions[4]; - - /* if guest os id is not set hypercall should remain disabled */ - if (!kvm->arch.hv_guest_os_id) - break; - if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { - kvm->arch.hv_hypercall = data; - break; - } - gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return 1; - kvm_x86_ops->patch_hypercall(vcpu, instructions); - ((unsigned char *)instructions)[3] = 0xc3; /* ret */ - if (__copy_to_user((void __user *)addr, instructions, 4)) - return 1; - kvm->arch.hv_hypercall = data; - mark_page_dirty(kvm, gfn); - break; - } - case HV_X64_MSR_REFERENCE_TSC: { - u64 gfn; - HV_REFERENCE_TSC_PAGE tsc_ref; - memset(&tsc_ref, 0, sizeof(tsc_ref)); - kvm->arch.hv_tsc_page = data; - if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) - break; - gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; - if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, - &tsc_ref, sizeof(tsc_ref))) - return 1; - mark_page_dirty(kvm, gfn); - break; - } - default: - vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); - return 1; - } - return 0; -} - -static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) -{ - switch (msr) { - case HV_X64_MSR_APIC_ASSIST_PAGE: { - u64 gfn; - unsigned long addr; - - if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { - vcpu->arch.hv_vapic = data; - if (kvm_lapic_enable_pv_eoi(vcpu, 0)) - return 1; - break; - } - gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; - addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); - if (kvm_is_error_hva(addr)) - return 1; - if (__clear_user((void __user *)addr, PAGE_SIZE)) - return 1; - vcpu->arch.hv_vapic = data; - kvm_vcpu_mark_page_dirty(vcpu, gfn); - if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) - return 1; - break; - } - case HV_X64_MSR_EOI: - return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); - case HV_X64_MSR_ICR: - return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); - case HV_X64_MSR_TPR: - return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); - default: - vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); - return 1; - } - - return 0; -} - static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; @@ -2224,15 +2092,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: - if (kvm_hv_msr_partition_wide(msr)) { - int r; - mutex_lock(&vcpu->kvm->lock); - r = set_msr_hyperv_pw(vcpu, msr, data); - mutex_unlock(&vcpu->kvm->lock); - return r; - } else - return set_msr_hyperv(vcpu, msr, data); - break; + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_set_msr_common(vcpu, msr, data, + msr_info->host_initiated); case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. @@ -2315,68 +2178,6 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return 0; } -static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) -{ - u64 data = 0; - struct kvm *kvm = vcpu->kvm; - - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - data = kvm->arch.hv_guest_os_id; - break; - case HV_X64_MSR_HYPERCALL: - data = kvm->arch.hv_hypercall; - break; - case HV_X64_MSR_TIME_REF_COUNT: { - data = - div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); - break; - } - case HV_X64_MSR_REFERENCE_TSC: - data = kvm->arch.hv_tsc_page; - break; - default: - vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); - return 1; - } - - *pdata = data; - return 0; -} - -static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) -{ - u64 data = 0; - - switch (msr) { - case HV_X64_MSR_VP_INDEX: { - int r; - struct kvm_vcpu *v; - kvm_for_each_vcpu(r, v, vcpu->kvm) { - if (v == vcpu) { - data = r; - break; - } - } - break; - } - case HV_X64_MSR_EOI: - return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); - case HV_X64_MSR_ICR: - return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); - case HV_X64_MSR_TPR: - return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); - case HV_X64_MSR_APIC_ASSIST_PAGE: - data = vcpu->arch.hv_vapic; - break; - default: - vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); - return 1; - } - *pdata = data; - return 0; -} - int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { switch (msr_info->index) { @@ -2493,14 +2294,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: - if (kvm_hv_msr_partition_wide(msr_info->index)) { - int r; - mutex_lock(&vcpu->kvm->lock); - r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data); - mutex_unlock(&vcpu->kvm->lock); - return r; - } else - return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data); + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_get_msr_common(vcpu, + msr_info->index, &msr_info->data); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current @@ -2651,6 +2448,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_TSC_DEADLINE_TIMER: case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_DISABLE_QUIRKS: + case KVM_CAP_SET_BOOT_CPU_ID: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: @@ -2810,7 +2608,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : - native_read_tsc() - vcpu->arch.last_host_tsc; + rdtsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { @@ -2838,7 +2636,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); - vcpu->arch.last_host_tsc = native_read_tsc(); + vcpu->arch.last_host_tsc = rdtsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -3817,30 +3615,25 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_master); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_slave); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_eclr); + kvm_destroy_pic(vpic); mutex_unlock(&kvm->slots_lock); - kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; - smp_wmb(); - kvm->arch.vpic = vpic; - smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); - kvm_destroy_pic(kvm); + kvm_destroy_pic(vpic); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); + goto create_irqchip_unlock; } + /* Write kvm->irq_routing before kvm->arch.vpic. */ + smp_wmb(); + kvm->arch.vpic = vpic; create_irqchip_unlock: mutex_unlock(&kvm->lock); break; @@ -3967,6 +3760,15 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_reinject(kvm, &control); break; } + case KVM_SET_BOOT_CPU_ID: + r = 0; + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus) != 0) + r = -EBUSY; + else + kvm->arch.bsp_vcpu_id = arg; + mutex_unlock(&kvm->lock); + break; case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, @@ -5882,66 +5684,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_halt); -int kvm_hv_hypercall(struct kvm_vcpu *vcpu) -{ - u64 param, ingpa, outgpa, ret; - uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; - bool fast, longmode; - - /* - * hypercall generates UD from non zero cpl and real mode - * per HYPER-V spec - */ - if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 0; - } - - longmode = is_64_bit_mode(vcpu); - - if (!longmode) { - param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); - ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); - outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); - } -#ifdef CONFIG_X86_64 - else { - param = kvm_register_read(vcpu, VCPU_REGS_RCX); - ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); - outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); - } -#endif - - code = param & 0xffff; - fast = (param >> 16) & 0x1; - rep_cnt = (param >> 32) & 0xfff; - rep_idx = (param >> 48) & 0xfff; - - trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); - - switch (code) { - case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: - kvm_vcpu_on_spin(vcpu); - break; - default: - res = HV_STATUS_INVALID_HYPERCALL_CODE; - break; - } - - ret = res | (((u64)rep_done & 0xfff) << 32); - if (longmode) { - kvm_register_write(vcpu, VCPU_REGS_RAX, ret); - } else { - kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); - kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); - } - - return 1; -} - /* * kvm_pv_kick_cpu_op: Kick a vcpu. * @@ -6518,6 +6260,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu_scan_ioapic(vcpu); if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) kvm_vcpu_reload_apic_access_page(vcpu); + if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; + r = 0; + goto out; + } } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { @@ -6627,7 +6375,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, - native_read_tsc()); + rdtsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); @@ -7436,7 +7184,7 @@ int kvm_arch_hardware_enable(void) if (ret != 0) return ret; - local_tsc = native_read_tsc(); + local_tsc = rdtsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { @@ -7540,6 +7288,17 @@ void kvm_arch_check_processor_compat(void *rtn) kvm_x86_ops->check_processor_compatibility(rtn); } +bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); + +bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; +} + bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 0ca2f3e4803c..2f822cd886c2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, return kvm_register_write(vcpu, reg, val); } +static inline u64 get_kernel_ns(void) +{ + return ktime_get_boot_ns(); +} + static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) { return !(kvm->arch.disabled_quirks & quirk); diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 39d6a3db0b96..e912b2f6d36e 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -20,6 +20,7 @@ #include <asm/processor.h> #include <asm/delay.h> #include <asm/timer.h> +#include <asm/mwait.h> #ifdef CONFIG_SMP # include <asm/smp.h> @@ -49,16 +50,14 @@ static void delay_loop(unsigned long loops) /* TSC based delay: */ static void delay_tsc(unsigned long __loops) { - u32 bclock, now, loops = __loops; + u64 bclock, now, loops = __loops; int cpu; preempt_disable(); cpu = smp_processor_id(); - rdtsc_barrier(); - rdtscl(bclock); + bclock = rdtsc_ordered(); for (;;) { - rdtsc_barrier(); - rdtscl(now); + now = rdtsc_ordered(); if ((now - bclock) >= loops) break; @@ -79,14 +78,51 @@ static void delay_tsc(unsigned long __loops) if (unlikely(cpu != smp_processor_id())) { loops -= (now - bclock); cpu = smp_processor_id(); - rdtsc_barrier(); - rdtscl(bclock); + bclock = rdtsc_ordered(); } } preempt_enable(); } /* + * On some AMD platforms, MWAITX has a configurable 32-bit timer, that + * counts with TSC frequency. The input value is the loop of the + * counter, it will exit when the timer expires. + */ +static void delay_mwaitx(unsigned long __loops) +{ + u64 start, end, delay, loops = __loops; + + start = rdtsc_ordered(); + + for (;;) { + delay = min_t(u64, MWAITX_MAX_LOOPS, loops); + + /* + * Use cpu_tss as a cacheline-aligned, seldomly + * accessed per-cpu variable as the monitor target. + */ + __monitorx(this_cpu_ptr(&cpu_tss), 0, 0); + + /* + * AMD, like Intel, supports the EAX hint and EAX=0xf + * means, do not enter any deep C-state and we use it + * here in delay() to minimize wakeup latency. + */ + __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); + + end = rdtsc_ordered(); + + if (loops <= end - start) + break; + + loops -= end - start; + + start = end; + } +} + +/* * Since we calibrate only once at boot, this * function should be set once at boot and not changed */ @@ -94,13 +130,19 @@ static void (*delay_fn)(unsigned long) = delay_loop; void use_tsc_delay(void) { - delay_fn = delay_tsc; + if (delay_fn == delay_loop) + delay_fn = delay_tsc; +} + +void use_mwaitx_delay(void) +{ + delay_fn = delay_mwaitx; } int read_current_timer(unsigned long *timer_val) { if (delay_fn == delay_tsc) { - rdtscll(*timer_val); + *timer_val = rdtsc(); return 0; } return -1; diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c index 8300db71c2a6..8db26591d91a 100644 --- a/arch/x86/math-emu/get_address.c +++ b/arch/x86/math-emu/get_address.c @@ -20,6 +20,7 @@ #include <linux/stddef.h> #include <asm/uaccess.h> +#include <asm/vm86.h> #include "fpu_system.h" #include "exception.h" diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9dc909841739..eef44d9a3f77 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,6 +20,7 @@ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/fixmap.h> /* VSYSCALL_ADDR */ #include <asm/vsyscall.h> /* emulate_vsyscall */ +#include <asm/vm86.h> /* struct vm86 */ #define CREATE_TRACE_POINTS #include <asm/trace/exceptions.h> @@ -301,14 +302,16 @@ static inline void check_v8086_mode(struct pt_regs *regs, unsigned long address, struct task_struct *tsk) { +#ifdef CONFIG_VM86 unsigned long bit; - if (!v8086_mode(regs)) + if (!v8086_mode(regs) || !tsk->thread.vm86) return; bit = (address - 0xA0000) >> PAGE_SHIFT; if (bit < 32) - tsk->thread.screen_bitmap |= 1 << bit; + tsk->thread.vm86->screen_bitmap |= 1 << bit; +#endif } static bool low_pfn(unsigned long pfn) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e1840f3db5b5..9ce5da27b136 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -12,20 +12,6 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX]; -static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; -static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; -static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; - -/* - * This page used as early shadow. We don't use empty_zero_page - * at early stages, stack instrumentation could write some garbage - * to this page. - * Latter we reuse it as zero shadow for large ranges of memory - * that allowed to access, but not instrumented by kasan - * (vmalloc/vmemmap ...). - */ -static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; - static int __init map_range(struct range *range) { unsigned long start; @@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd) } } -static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr, - unsigned long end) -{ - pte_t *pte = pte_offset_kernel(pmd, addr); - - while (addr + PAGE_SIZE <= end) { - WARN_ON(!pte_none(*pte)); - set_pte(pte, __pte(__pa_nodebug(kasan_zero_page) - | __PAGE_KERNEL_RO)); - addr += PAGE_SIZE; - pte = pte_offset_kernel(pmd, addr); - } - return 0; -} - -static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pmd_t *pmd = pmd_offset(pud, addr); - - while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { - WARN_ON(!pmd_none(*pmd)); - set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | _KERNPG_TABLE)); - addr += PMD_SIZE; - pmd = pmd_offset(pud, addr); - } - if (addr < end) { - if (pmd_none(*pmd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pte_populate(pmd, addr, end); - } - return ret; -} - - -static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pud_t *pud = pud_offset(pgd, addr); - - while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { - WARN_ON(!pud_none(*pud)); - set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | _KERNPG_TABLE)); - addr += PUD_SIZE; - pud = pud_offset(pgd, addr); - } - - if (addr < end) { - if (pud_none(*pud)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pmd_populate(pud, addr, end); - } - return ret; -} - -static int __init zero_pgd_populate(unsigned long addr, unsigned long end) -{ - int ret = 0; - pgd_t *pgd = pgd_offset_k(addr); - - while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { - WARN_ON(!pgd_none(*pgd)); - set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | _KERNPG_TABLE)); - addr += PGDIR_SIZE; - pgd = pgd_offset_k(addr); - } - - if (addr < end) { - if (pgd_none(*pgd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pud_populate(pgd, addr, end); - } - return ret; -} - - -static void __init populate_zero_shadow(const void *start, const void *end) -{ - if (zero_pgd_populate((unsigned long)start, (unsigned long)end)) - panic("kasan: unable to map zero shadow!"); -} - - #ifdef CONFIG_KASAN_INLINE static int kasan_die_handler(struct notifier_block *self, unsigned long val, @@ -213,7 +99,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); - populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_X_MAX; i++) { @@ -223,14 +109,15 @@ void __init kasan_init(void) if (map_range(&pfn_mapped[i])) panic("kasan: unable to allocate shadow!"); } - populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); + kasan_populate_zero_shadow( + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + kasan_mem_to_shadow((void *)__START_KERNEL_map)); vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), NUMA_NO_NODE); - populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); memset(kasan_zero_page, 0, PAGE_SIZE); diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 8fd6f44aee83..09d3afc0a181 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -166,7 +166,6 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; - pci_read_bridge_bases(b); list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); } @@ -673,24 +672,22 @@ int pcibios_add_device(struct pci_dev *dev) return 0; } -int pcibios_enable_device(struct pci_dev *dev, int mask) +int pcibios_alloc_irq(struct pci_dev *dev) { - int err; - - if ((err = pci_enable_resources(dev, mask)) < 0) - return err; - - if (!pci_dev_msi_enabled(dev)) - return pcibios_enable_irq(dev); - return 0; + return pcibios_enable_irq(dev); } -void pcibios_disable_device (struct pci_dev *dev) +void pcibios_free_irq(struct pci_dev *dev) { - if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + if (pcibios_disable_irq) pcibios_disable_irq(dev); } +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + return pci_enable_resources(dev, mask); +} + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 9a2b7101ae8a..e58565556703 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -62,19 +62,6 @@ static void pci_fixup_umc_ide(struct pci_dev *d) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); -static void pci_fixup_ncr53c810(struct pci_dev *d) -{ - /* - * NCR 53C810 returns class code 0 (at least on some systems). - * Fix class to be PCI_CLASS_STORAGE_SCSI - */ - if (!d->class) { - dev_warn(&d->dev, "Fixing NCR 53C810 class code\n"); - d->class = PCI_CLASS_STORAGE_SCSI << 8; - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810); - static void pci_fixup_latency(struct pci_dev *d) { /* diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 27062303c881..22aaefb4f1ca 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -211,7 +211,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) struct irq_alloc_info info; int polarity; - if (dev->irq_managed && dev->irq > 0) + if (pci_has_managed_irq(dev)) return 0; if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) @@ -234,10 +234,13 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && - dev->irq > 0) { + if (pci_has_managed_irq(dev)) { mp_unmap_irq(dev->irq); dev->irq_managed = 0; + /* + * Don't reset dev->irq here, otherwise + * intel_mid_pci_irq_enable() will fail on next call. + */ } } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9bd115484745..32e70343e6fd 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev) struct pci_dev *temp_dev; int irq; - if (dev->irq_managed && dev->irq > 0) + if (pci_has_managed_irq(dev)) return 0; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, @@ -1230,8 +1230,7 @@ static int pirq_enable_irq(struct pci_dev *dev) } dev = temp_dev; if (irq >= 0) { - dev->irq_managed = 1; - dev->irq = irq; + pci_set_managed_irq(dev, irq); dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; @@ -1257,24 +1256,10 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; } -bool mp_should_keep_irq(struct device *dev) -{ - if (dev->power.is_prepared) - return true; -#ifdef CONFIG_PM - if (dev->power.runtime_status == RPM_SUSPENDING) - return true; -#endif - - return false; -} - static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && - dev->irq_managed && dev->irq) { + if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) { mp_unmap_irq(dev->irq); - dev->irq = 0; - dev->irq_managed = 0; + pci_reset_managed_irq(dev); } } diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig new file mode 100644 index 000000000000..10fea5fc821e --- /dev/null +++ b/arch/x86/ras/Kconfig @@ -0,0 +1,11 @@ +config AMD_MCE_INJ + tristate "Simple MCE injection interface for AMD processors" + depends on RAS && EDAC_DECODE_MCE && DEBUG_FS + default n + help + This is a simple debugfs interface to inject MCEs and test different + aspects of the MCE handling code. + + WARNING: Do not even assume this interface is staying stable! + + diff --git a/arch/x86/ras/Makefile b/arch/x86/ras/Makefile new file mode 100644 index 000000000000..dd2c98b84037 --- /dev/null +++ b/arch/x86/ras/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_AMD_MCE_INJ) += mce_amd_inj.o + diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c new file mode 100644 index 000000000000..17e35b5bf779 --- /dev/null +++ b/arch/x86/ras/mce_amd_inj.c @@ -0,0 +1,375 @@ +/* + * A simple MCE injection facility for testing different aspects of the RAS + * code. This driver should be built as module so that it can be loaded + * on production kernels for testing purposes. + * + * This file may be distributed under the terms of the GNU General Public + * License version 2. + * + * Copyright (c) 2010-15: Borislav Petkov <bp@alien8.de> + * Advanced Micro Devices Inc. + */ + +#include <linux/kobject.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/cpu.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <asm/mce.h> + +#include "../kernel/cpu/mcheck/mce-internal.h" + +/* + * Collect all the MCi_XXX settings + */ +static struct mce i_mce; +static struct dentry *dfs_inj; + +static u8 n_banks; + +#define MAX_FLAG_OPT_SIZE 3 + +enum injection_type { + SW_INJ = 0, /* SW injection, simply decode the error */ + HW_INJ, /* Trigger a #MC */ + N_INJ_TYPES, +}; + +static const char * const flags_options[] = { + [SW_INJ] = "sw", + [HW_INJ] = "hw", + NULL +}; + +/* Set default injection to SW_INJ */ +static enum injection_type inj_type = SW_INJ; + +#define MCE_INJECT_SET(reg) \ +static int inj_##reg##_set(void *data, u64 val) \ +{ \ + struct mce *m = (struct mce *)data; \ + \ + m->reg = val; \ + return 0; \ +} + +MCE_INJECT_SET(status); +MCE_INJECT_SET(misc); +MCE_INJECT_SET(addr); + +#define MCE_INJECT_GET(reg) \ +static int inj_##reg##_get(void *data, u64 *val) \ +{ \ + struct mce *m = (struct mce *)data; \ + \ + *val = m->reg; \ + return 0; \ +} + +MCE_INJECT_GET(status); +MCE_INJECT_GET(misc); +MCE_INJECT_GET(addr); + +DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n"); + +/* + * Caller needs to be make sure this cpu doesn't disappear + * from under us, i.e.: get_cpu/put_cpu. + */ +static int toggle_hw_mce_inject(unsigned int cpu, bool enable) +{ + u32 l, h; + int err; + + err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); + if (err) { + pr_err("%s: error reading HWCR\n", __func__); + return err; + } + + enable ? (l |= BIT(18)) : (l &= ~BIT(18)); + + err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); + if (err) + pr_err("%s: error writing HWCR\n", __func__); + + return err; +} + +static int __set_inj(const char *buf) +{ + int i; + + for (i = 0; i < N_INJ_TYPES; i++) { + if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) { + inj_type = i; + return 0; + } + } + return -EINVAL; +} + +static ssize_t flags_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[MAX_FLAG_OPT_SIZE]; + int n; + + n = sprintf(buf, "%s\n", flags_options[inj_type]); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); +} + +static ssize_t flags_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[MAX_FLAG_OPT_SIZE], *__buf; + int err; + size_t ret; + + if (cnt > MAX_FLAG_OPT_SIZE) + cnt = MAX_FLAG_OPT_SIZE; + + ret = cnt; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt - 1] = 0; + + /* strip whitespace */ + __buf = strstrip(buf); + + err = __set_inj(__buf); + if (err) { + pr_err("%s: Invalid flags value: %s\n", __func__, __buf); + return err; + } + + *ppos += ret; + + return ret; +} + +static const struct file_operations flags_fops = { + .read = flags_read, + .write = flags_write, + .llseek = generic_file_llseek, +}; + +/* + * On which CPU to inject? + */ +MCE_INJECT_GET(extcpu); + +static int inj_extcpu_set(void *data, u64 val) +{ + struct mce *m = (struct mce *)data; + + if (val >= nr_cpu_ids || !cpu_online(val)) { + pr_err("%s: Invalid CPU: %llu\n", __func__, val); + return -EINVAL; + } + m->extcpu = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n"); + +static void trigger_mce(void *info) +{ + asm volatile("int $18"); +} + +static void do_inject(void) +{ + u64 mcg_status = 0; + unsigned int cpu = i_mce.extcpu; + u8 b = i_mce.bank; + + if (i_mce.misc) + i_mce.status |= MCI_STATUS_MISCV; + + if (inj_type == SW_INJ) { + mce_inject_log(&i_mce); + return; + } + + /* prep MCE global settings for the injection */ + mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; + + if (!(i_mce.status & MCI_STATUS_PCC)) + mcg_status |= MCG_STATUS_RIPV; + + get_online_cpus(); + if (!cpu_online(cpu)) + goto err; + + toggle_hw_mce_inject(cpu, true); + + wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS, + (u32)mcg_status, (u32)(mcg_status >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b), + (u32)i_mce.status, (u32)(i_mce.status >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b), + (u32)i_mce.addr, (u32)(i_mce.addr >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b), + (u32)i_mce.misc, (u32)(i_mce.misc >> 32)); + + toggle_hw_mce_inject(cpu, false); + + smp_call_function_single(cpu, trigger_mce, NULL, 0); + +err: + put_online_cpus(); + +} + +/* + * This denotes into which bank we're injecting and triggers + * the injection, at the same time. + */ +static int inj_bank_set(void *data, u64 val) +{ + struct mce *m = (struct mce *)data; + + if (val >= n_banks) { + pr_err("Non-existent MCE bank: %llu\n", val); + return -EINVAL; + } + + m->bank = val; + do_inject(); + + return 0; +} + +MCE_INJECT_GET(bank); + +DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); + +static const char readme_msg[] = +"Description of the files and their usages:\n" +"\n" +"Note1: i refers to the bank number below.\n" +"Note2: See respective BKDGs for the exact bit definitions of the files below\n" +"as they mirror the hardware registers.\n" +"\n" +"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n" +"\t attributes of the error which caused the MCE.\n" +"\n" +"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n" +"\t used for error thresholding purposes and its validity is indicated by\n" +"\t MCi_STATUS[MiscV].\n" +"\n" +"addr:\t Error address value to be written to MCi_ADDR. Log address information\n" +"\t associated with the error.\n" +"\n" +"cpu:\t The CPU to inject the error on.\n" +"\n" +"bank:\t Specify the bank you want to inject the error into: the number of\n" +"\t banks in a processor varies and is family/model-specific, therefore, the\n" +"\t supplied value is sanity-checked. Setting the bank value also triggers the\n" +"\t injection.\n" +"\n" +"flags:\t Injection type to be performed. Writing to this file will trigger a\n" +"\t real machine check, an APIC interrupt or invoke the error decoder routines\n" +"\t for AMD processors.\n" +"\n" +"\t Allowed error injection types:\n" +"\t - \"sw\": Software error injection. Decode error to a human-readable \n" +"\t format only. Safe to use.\n" +"\t - \"hw\": Hardware error injection. Causes the #MC exception handler to \n" +"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" +"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" +"\t before injecting.\n" +"\n"; + +static ssize_t +inj_readme_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_read_from_buffer(ubuf, cnt, ppos, + readme_msg, strlen(readme_msg)); +} + +static const struct file_operations readme_fops = { + .read = inj_readme_read, +}; + +static struct dfs_node { + char *name; + struct dentry *d; + const struct file_operations *fops; + umode_t perm; +} dfs_fls[] = { + { .name = "status", .fops = &status_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "README", .fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH }, +}; + +static int __init init_mce_inject(void) +{ + int i; + u64 cap; + + rdmsrl(MSR_IA32_MCG_CAP, cap); + n_banks = cap & MCG_BANKCNT_MASK; + + dfs_inj = debugfs_create_dir("mce-inject", NULL); + if (!dfs_inj) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) { + dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name, + dfs_fls[i].perm, + dfs_inj, + &i_mce, + dfs_fls[i].fops); + + if (!dfs_fls[i].d) + goto err_dfs_add; + } + + return 0; + +err_dfs_add: + while (--i >= 0) + debugfs_remove(dfs_fls[i].d); + + debugfs_remove(dfs_inj); + dfs_inj = NULL; + + return -ENOMEM; +} + +static void __exit exit_mce_inject(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) + debugfs_remove(dfs_fls[i].d); + + memset(&dfs_fls, 0, sizeof(dfs_fls)); + + debugfs_remove(dfs_inj); + dfs_inj = NULL; +} +module_init(init_mce_inject); +module_exit(exit_mce_inject); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); +MODULE_AUTHOR("AMD Inc."); +MODULE_DESCRIPTION("MCE injection facility for RAS testing"); diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index b9531d343134..755481f14d90 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -45,17 +45,4 @@ #define read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0) -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - * - * (Could use an alternative three way for this if there was one.) - */ -static inline void rdtsc_barrier(void) -{ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); -} - #endif diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 11d6fb4e8483..d9cfa452da9d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1215,11 +1215,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_msr = xen_read_msr_safe, .write_msr = xen_write_msr_safe, - .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, - .read_tscp = native_read_tscp, - .iret = xen_iret, #ifdef CONFIG_X86_64 .usergs_sysret32 = xen_sysret32, diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e5b872ba2484..3bd3504a6cc7 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -14,12 +14,15 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_DMA_API_DEBUG + select HAVE_DMA_ATTRS select HAVE_FUNCTION_TRACER select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA + select PERF_USE_VMALLOC select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica @@ -61,9 +64,7 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y config MMU - bool - default n if !XTENSA_VARIANT_CUSTOM - default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM + def_bool n config VARIANT_IRQ_SWITCH def_bool n @@ -71,9 +72,6 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n -config MAY_HAVE_SMP - def_bool n - menu "Processor type and features" choice @@ -100,7 +98,6 @@ config XTENSA_VARIANT_DC233C config XTENSA_VARIANT_CUSTOM bool "Custom Xtensa processor configuration" - select MAY_HAVE_SMP select HAVE_XTENSA_GPIO32 help Select this variant to use a custom Xtensa processor configuration. @@ -126,10 +123,21 @@ config XTENSA_VARIANT_MMU bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" depends on XTENSA_VARIANT_CUSTOM default y + select MMU help Build a Conventional Kernel with full MMU support, ie: it supports a TLB with auto-loading, page protection. +config XTENSA_VARIANT_HAVE_PERF_EVENTS + bool "Core variant has Performance Monitor Module" + depends on XTENSA_VARIANT_CUSTOM + default n + help + Enable if core variant has Performance Monitor Module with + External Registers Interface. + + If unsure, say N. + config XTENSA_UNALIGNED_USER bool "Unaligned memory access in use space" help @@ -143,7 +151,7 @@ source "kernel/Kconfig.preempt" config HAVE_SMP bool "System Supports SMP (MX)" - depends on MAY_HAVE_SMP + depends on XTENSA_VARIANT_CUSTOM select XTENSA_MX help This option is use to indicate that the system-on-a-chip (SOC) diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 5b478accd5fc..63c223dff5f1 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += clkdev.h generic-y += cputime.h -generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 00b7d46b35b8..ebcd1f6fc8cb 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Locking interrupts looks like this: * - * rsil a15, LOCKLEVEL + * rsil a15, TOPLEVEL * <code> * wsr a15, PS * rsync @@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(LOCKLEVEL)"\n"\ + " rsil a15, "__stringify(TOPLEVEL)"\n"\ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(LOCKLEVEL)"\n" \ + " rsil a15,"__stringify(TOPLEVEL)"\n" \ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) unsigned int vval; __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" + " rsil a15,"__stringify(TOPLEVEL)"\n" " l32i %0, %2, 0\n" " xor %1, %4, %3\n" " and %0, %0, %4\n" @@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) unsigned int vval; __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" + " rsil a15,"__stringify(TOPLEVEL)"\n" " l32i %0, %2, 0\n" " or %0, %0, %1\n" " s32i %0, %2, 0\n" diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 370b26f38414..201e9009efd8 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) return new; #else __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " bne %0, %2, 1f\n" " s32i %3, %1, 0\n" @@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #else unsigned long tmp; __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " s32i %2, %1, 0\n" " wsr a15, ps\n" diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h new file mode 100644 index 000000000000..fe1f5c878493 --- /dev/null +++ b/arch/xtensa/include/asm/device.h @@ -0,0 +1,19 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_XTENSA_DEVICE_H +#define _ASM_XTENSA_DEVICE_H + +struct dma_map_ops; + +struct dev_archdata { + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_XTENSA_DEVICE_H */ diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 1f5f6dc09736..f01cb3044e50 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/dma-mapping.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_DMA_MAPPING_H @@ -13,142 +12,67 @@ #include <asm/cache.h> #include <asm/io.h> + +#include <asm-generic/dma-coherent.h> + #include <linux/mm.h> #include <linux/scatterlist.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -/* - * DMA-consistent mapping functions. - */ - -extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long); -extern void consistent_free(void*, size_t, dma_addr_t); -extern void consistent_sync(void*, size_t, int); - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); +extern struct dma_map_ops xtensa_dma_map_ops; -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - consistent_sync(ptr, size, direction); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG_ON(direction == DMA_NONE); + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + else + return &xtensa_dma_map_ops; } -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - BUG_ON(direction == DMA_NONE); - - for_each_sg(sglist, sg, nents, i) { - BUG_ON(!sg_page(sg)); +#include <asm-generic/dma-mapping-common.h> - sg->dma_address = sg_phys(sg); - consistent_sync(sg_virt(sg), sg->length, direction); - } +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { - BUG_ON(direction == DMA_NONE); -} + void *ret; + struct dma_map_ops *ops = get_dma_ops(dev); + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); -} + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, ret); -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); + return ret; } -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { + struct dma_map_ops *ops = get_dma_ops(dev); - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); + ops->free(dev, size, vaddr, dma_handle, attrs); + debug_dma_free_coherent(dev, size, vaddr, dma_handle); } -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return 0; + struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_mapping_error(dev, dma_addr); + return ops->mapping_error(dev, dma_addr); } static inline int @@ -168,39 +92,7 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - consistent_sync(vaddr, size, direction); -} - -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - return NULL; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h index ea36674c6ec5..8e090c709046 100644 --- a/arch/xtensa/include/asm/irqflags.h +++ b/arch/xtensa/include/asm/irqflags.h @@ -6,6 +6,7 @@ * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_IRQFLAGS_H @@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_irq_save(void) { unsigned long flags; - asm volatile("rsil %0, "__stringify(LOCKLEVEL) +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + unsigned long tmp; + + asm volatile("rsr %0, ps\t\n" + "extui %1, %0, 0, 4\t\n" + "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n" + "rsil %0, "__stringify(LOCKLEVEL)"\n" + "1:" + : "=a" (flags), "=a" (tmp) :: "memory"); +#else + asm volatile("rsr %0, ps\t\n" + "or %0, %0, %1\t\n" + "xsr %0, ps\t\n" + "rsync" + : "=&a" (flags) : "a" (LOCKLEVEL) : "memory"); +#endif +#else + asm volatile("rsil %0, "__stringify(LOCKLEVEL) : "=a" (flags) :: "memory"); +#endif return flags; } diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index b61bdf0eea25..83e2e4bc01ba 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/processor.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_PROCESSOR_H @@ -45,6 +44,14 @@ #define STACK_TOP_MAX STACK_TOP /* + * General exception cause assigned to fake NMI. Fake NMI needs to be handled + * differently from other interrupts, but it uses common kernel entry/exit + * code. + */ + +#define EXCCAUSE_MAPPED_NMI 62 + +/* * General exception cause assigned to debug exceptions. Debug exceptions go * to their own vector, rather than the general exception vectors (user, * kernel, double); and their specific causes are reported via DEBUGCAUSE @@ -65,10 +72,30 @@ #define VALID_DOUBLE_EXCEPTION_ADDRESS 64 +#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno) +#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL + +#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level) +#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK) + +#define IS_POW2(v) (((v) & ((v) - 1)) == 0) + +#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT) + /* LOCKLEVEL defines the interrupt level that masks all * general-purpose interrupts. */ +#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \ + defined(XCHAL_PROFILING_INTERRUPT) && \ + PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ + XCHAL_EXCM_LEVEL > 1 && \ + IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)) +#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1) +#else #define LOCKLEVEL XCHAL_EXCM_LEVEL +#endif +#define TOPLEVEL XCHAL_EXCM_LEVEL +#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL) /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE * registers diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h index 6a05fcb0a20d..fe06e8ed162b 100644 --- a/arch/xtensa/include/asm/stacktrace.h +++ b/arch/xtensa/include/asm/stacktrace.h @@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), void *data); +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data); +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data); + #endif /* _XTENSA_STACKTRACE_H */ diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 677bfcf4ee5d..28f33a8b7f5f 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -25,30 +25,39 @@ static inline void spill_registers(void) { #if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( - " call12 1f\n" + " call8 1f\n" " _j 2f\n" " retw\n" " .align 4\n" "1:\n" +#if XCHAL_NUM_AREGS == 32 + " _entry a1, 32\n" + " addi a8, a0, 3\n" + " _entry a1, 16\n" + " mov a12, a12\n" + " retw\n" +#else " _entry a1, 48\n" - " addi a12, a0, 3\n" -#if XCHAL_NUM_AREGS > 32 - " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " call12 1f\n" + " retw\n" + " .align 4\n" + "1:\n" + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n" " _entry a1, 48\n" " mov a12, a0\n" " .endr\n" -#endif - " _entry a1, 48\n" + " _entry a1, 16\n" #if XCHAL_NUM_AREGS % 12 == 0 - " mov a8, a8\n" -#elif XCHAL_NUM_AREGS % 12 == 4 " mov a12, a12\n" -#elif XCHAL_NUM_AREGS % 12 == 8 +#elif XCHAL_NUM_AREGS % 12 == 4 " mov a4, a4\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a8, a8\n" #endif " retw\n" +#endif "2:\n" - : : : "a12", "a13", "memory"); + : : : "a8", "a9", "memory"); #else __asm__ __volatile__ ( " mov a12, a12\n" diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index d3a0f0fd56dd..50137bc9e150 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_SMP) += smp.o mxhead.o +obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o AFLAGS_head.o += -mtext-section-literals @@ -27,10 +28,11 @@ AFLAGS_head.o += -mtext-section-literals # # Replicate rules in scripts/Makefile.build -sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ - -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ - -e 's/\*(\(\.text .*\))/*(.literal \1)/g' \ - -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' +sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \ + -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \ + -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \ + -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \ + -e 's/\.{text}/.text/g' quiet_cmd__cpp_lds_S = LDS $@ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 82bbfa5a05b3..504130357597 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/entry.S - * * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public @@ -8,6 +6,7 @@ * for more details. * * Copyright (C) 2004 - 2008 by Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * @@ -75,6 +74,27 @@ #endif .endm + + .macro irq_save flags tmp +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + rsr \flags, ps + extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei \tmp, LOCKLEVEL, 99f + rsil \tmp, LOCKLEVEL +99: +#else + movi \tmp, LOCKLEVEL + rsr \flags, ps + or \flags, \flags, \tmp + xsr \flags, ps + rsync +#endif +#else + rsil \flags, LOCKLEVEL +#endif + .endm + /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* @@ -122,6 +142,7 @@ _user_exception: /* Save SAR and turn off single stepping */ movi a2, 0 + wsr a2, depc # terminate user stack trace with 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR @@ -301,7 +322,18 @@ _kernel_exception: s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 + _bnei a2, 1, 1f + + /* Copy spill slots of a0 and a1 to imitate movsp + * in order to keep exception stack continuous + */ + l32i a3, a1, PT_SIZE + l32i a0, a1, PT_SIZE + 4 + s32e a3, a1, -16 + s32e a0, a1, -12 1: + l32i a0, a1, PT_AREG0 # restore saved a0 + wsr a0, depc #ifdef KERNEL_STACK_OVERFLOW_CHECK @@ -340,75 +372,88 @@ common_exception: /* It is now save to restore the EXC_TABLE_FIXUP variable. */ - rsr a0, exccause + rsr a2, exccause movi a3, 0 - rsr a2, excsave1 - s32i a0, a1, PT_EXCCAUSE - s32i a3, a2, EXC_TABLE_FIXUP - - /* All unrecoverable states are saved on stack, now, and a1 is valid, - * so we can allow exceptions and interrupts (*) again. - * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) + rsr a0, excsave1 + s32i a2, a1, PT_EXCCAUSE + s32i a3, a0, EXC_TABLE_FIXUP + + /* All unrecoverable states are saved on stack, now, and a1 is valid. + * Now we can allow exceptions again. In case we've got an interrupt + * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, + * otherwise it's left unchanged. * - * (*) We only allow interrupts if they were previously enabled and - * we're not handling an IRQ + * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) */ rsr a3, ps - addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT - movi a2, LOCKLEVEL + s32i a3, a1, PT_PS # save ps + +#if XTENSA_FAKE_NMI + /* Correct PS needs to be saved in the PT_PS: + * - in case of exception or level-1 interrupt it's in the PS, + * and is already saved. + * - in case of medium level interrupt it's in the excsave2. + */ + movi a0, EXCCAUSE_MAPPED_NMI + extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + beq a2, a0, .Lmedium_level_irq + bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception + beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 + +.Lmedium_level_irq: + rsr a0, excsave2 + s32i a0, a1, PT_PS # save medium-level interrupt ps + bgei a3, LOCKLEVEL, .Lexception + +.Llevel1_irq: + movi a3, LOCKLEVEL + +.Lexception: + movi a0, 1 << PS_WOE_BIT + or a3, a3, a0 +#else + addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT + movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL - moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt + moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 - rsr a0, exccause - xsr a3, ps + rsr a2, exccause +#endif - s32i a3, a1, PT_PS # save ps + /* restore return address (or 0 if return to userspace) */ + rsr a0, depc + wsr a3, ps + rsync # PS.WOE => rsync => overflow /* Save lbeg, lend */ - rsr a2, lbeg + rsr a4, lbeg rsr a3, lend - s32i a2, a1, PT_LBEG + s32i a4, a1, PT_LBEG s32i a3, a1, PT_LEND /* Save SCOMPARE1 */ #if XCHAL_HAVE_S32C1I - rsr a2, scompare1 - s32i a2, a1, PT_SCOMPARE1 + rsr a3, scompare1 + s32i a3, a1, PT_SCOMPARE1 #endif /* Save optional registers. */ - save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT + save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT -#ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we've just disabled them. - */ - movi a4, trace_hardirqs_off - callx4 a4 -1: -#endif - /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ rsr a4, excsave1 mov a6, a1 # pass stack frame - mov a7, a0 # pass EXCCAUSE - addx4 a4, a0, a4 + mov a7, a2 # pass EXCCAUSE + addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler /* Call the second-level handler */ @@ -419,8 +464,17 @@ common_exception: .global common_exception_return common_exception_return: +#if XTENSA_FAKE_NMI + l32i a2, a1, PT_EXCCAUSE + movi a3, EXCCAUSE_MAPPED_NMI + beq a2, a3, .LNMIexit +#endif 1: - rsil a2, LOCKLEVEL + irq_save a2, a3 +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_off + callx4 a4 +#endif /* Jump if we are returning from kernel exceptions. */ @@ -445,6 +499,10 @@ common_exception_return: /* Call do_signal() */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 @@ -453,6 +511,10 @@ common_exception_return: 3: /* Reschedule */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, schedule # void schedule (void) callx4 a4 @@ -471,6 +533,12 @@ common_exception_return: j 1b #endif +#if XTENSA_FAKE_NMI +.LNMIexit: + l32i a3, a1, PT_PS + _bbci.l a3, PS_UM_BIT, 4f +#endif + 5: #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC @@ -481,16 +549,8 @@ common_exception_return: 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we'll reenable them on return. - */ + extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei a4, LOCKLEVEL, 1f movi a4, trace_hardirqs_on callx4 a4 1: @@ -568,12 +628,13 @@ user_exception_exit: * (if we have restored WSBITS-1 frames). */ +2: #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr #endif -2: j common_exception_exit + j common_exception_exit /* This is the kernel exception exit. * We avoided to do a MOVSP when we entered the exception, but we @@ -1561,6 +1622,13 @@ ENTRY(fast_second_level_miss) rfde 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 + bnez a0, 8b + + /* Even more unlikely case active_mm == 0. + * We can get here with NMI in the middle of context_switch that + * touches vmalloc area. + */ + movi a0, init_mm j 8b #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -1820,7 +1888,7 @@ ENDPROC(system_call) mov a12, a0 .endr #endif - _entry a1, 48 + _entry a1, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 4 @@ -1844,7 +1912,7 @@ ENDPROC(system_call) ENTRY(_switch_to) - entry a1, 16 + entry a1, 48 mov a11, a3 # and 'next' (a3) @@ -1864,10 +1932,8 @@ ENTRY(_switch_to) /* Disable ints while we manipulate the stack pointer. */ - rsil a14, LOCKLEVEL - rsr a3, excsave1 + irq_save a14, a3 rsync - s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ /* Switch CPENABLE */ @@ -1888,9 +1954,7 @@ ENTRY(_switch_to) */ rsr a3, excsave1 # exc_table - movi a6, 0 addi a7, a5, PT_REGS_OFFSET - s32i a6, a3, EXC_TABLE_FIXUP s32i a7, a3, EXC_TABLE_KSTK /* restore context of the task 'next' */ diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 3eee94f621eb..6df31cacc4b8 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -28,7 +28,7 @@ #include <asm/uaccess.h> #include <asm/platform.h> -atomic_t irq_err_count; +DECLARE_PER_CPU(unsigned long, nmi_count); asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { @@ -57,11 +57,16 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) int arch_show_interrupts(struct seq_file *p, int prec) { + unsigned cpu __maybe_unused; #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif - seq_printf(p, "%*s: ", prec, "ERR"); - seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); +#if XTENSA_FAKE_NMI + seq_printf(p, "%*s:", prec, "NMI"); + for_each_online_cpu(cpu) + seq_printf(p, " %10lu", per_cpu(nmi_count, cpu)); + seq_puts(p, " Non-maskable interrupts\n"); +#endif return 0; } @@ -106,6 +111,12 @@ int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "timer"); irq_clear_status_flags(irq, IRQ_LEVEL); +#ifdef XCHAL_INTTYPE_MASK_PROFILING + } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "profiling"); + irq_set_status_flags(irq, IRQ_LEVEL); +#endif } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler_name(irq, irq_chip, diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index e8b76b8e4b29..fb75ebf1463a 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/pci-dma.c - * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it @@ -9,6 +7,7 @@ * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Based on version for i386. * @@ -25,13 +24,107 @@ #include <asm/io.h> #include <asm/cacheflush.h> +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + __flush_invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_FROM_DEVICE: + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_TO_DEVICE: + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + } +} +EXPORT_SYMBOL(dma_cache_sync); + +static void xtensa_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + vaddr = bus_to_virt(dma_handle); + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_TO_DEVICE: + vaddr = bus_to_virt(dma_handle); + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_cpu(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + +static void xtensa_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_device(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ -void * -dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) +static void *xtensa_dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t flag, + struct dma_attrs *attrs) { unsigned long ret; unsigned long uncached = 0; @@ -52,20 +145,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; + *handle = virt_to_bus((void *)ret); + __invalidate_dcache_range(ret, size); - if (ret != 0) { - memset((void*) ret, 0, size); - uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; - *handle = virt_to_bus((void*)ret); - __flush_invalidate_dcache_range(ret, size); - } - - return (void*)uncached; + return (void *)uncached; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; @@ -75,24 +163,79 @@ void dma_free_coherent(struct device *hwdev, size_t size, free_pages(addr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); +static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t dma_handle = page_to_phys(page) + offset; + + BUG_ON(PageHighMem(page)); + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; +} -void consistent_sync(void *vaddr, size_t size, int direction) +static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { - switch (direction) { - case PCI_DMA_NONE: - BUG(); - case PCI_DMA_FROMDEVICE: /* invalidate only */ - __invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); +} - case PCI_DMA_TODEVICE: /* writeback only */ - case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; +static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + } + return nents; +} + +static void xtensa_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_unmap_page(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); } } -EXPORT_SYMBOL(consistent_sync); + +int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +struct dma_map_ops xtensa_dma_map_ops = { + .alloc = xtensa_dma_alloc, + .free = xtensa_dma_free, + .map_page = xtensa_map_page, + .unmap_page = xtensa_unmap_page, + .map_sg = xtensa_map_sg, + .unmap_sg = xtensa_unmap_sg, + .sync_single_for_cpu = xtensa_sync_single_for_cpu, + .sync_single_for_device = xtensa_sync_single_for_device, + .sync_sg_for_cpu = xtensa_sync_sg_for_cpu, + .sync_sg_for_device = xtensa_sync_sg_for_device, + .mapping_error = xtensa_dma_mapping_error, +}; +EXPORT_SYMBOL(xtensa_dma_map_ops); + +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init xtensa_dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(xtensa_dma_init); diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index b848cc3dc913..d27b4dcf221f 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -210,10 +210,6 @@ subsys_initcall(pcibios_init); void pcibios_fixup_bus(struct pci_bus *bus) { - if (bus->parent) { - /* This is a subordinate bridge */ - pci_read_bridge_bases(bus); - } } void pcibios_set_master(struct pci_dev *dev) diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c new file mode 100644 index 000000000000..54f01188c29c --- /dev/null +++ b/arch/xtensa/kernel/perf_event.c @@ -0,0 +1,454 @@ +/* + * Xtensa Performance Monitor Module driver + * See Tensilica Debug User's Guide for PMU registers documentation. + * + * Copyright (C) 2015 Cadence Design Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#include <asm/processor.h> +#include <asm/stacktrace.h> + +/* Global control/status for all perf counters */ +#define XTENSA_PMU_PMG 0x1000 +/* Perf counter values */ +#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4) +/* Perf counter control registers */ +#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4) +/* Perf counter status registers */ +#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4) + +#define XTENSA_PMU_PMG_PMEN 0x1 + +#define XTENSA_PMU_COUNTER_MASK 0xffffffffULL +#define XTENSA_PMU_COUNTER_MAX 0x7fffffff + +#define XTENSA_PMU_PMCTRL_INTEN 0x00000001 +#define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008 +#define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0 +#define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8 +#define XTENSA_PMU_PMCTRL_SELECT 0x00001f00 +#define XTENSA_PMU_PMCTRL_MASK_SHIFT 16 +#define XTENSA_PMU_PMCTRL_MASK 0xffff0000 + +#define XTENSA_PMU_MASK(select, mask) \ + (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \ + ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \ + XTENSA_PMU_PMCTRL_TRACELEVEL | \ + XTENSA_PMU_PMCTRL_INTEN) + +#define XTENSA_PMU_PMSTAT_OVFL 0x00000001 +#define XTENSA_PMU_PMSTAT_INTASRT 0x00000010 + +struct xtensa_pmu_events { + /* Array of events currently on this core */ + struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; + /* Bitmap of used hardware counters */ + unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)]; +}; +static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events); + +static const u32 xtensa_hw_ctl[] = { + [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1), + [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff), + [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1), + [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1), + /* Taken and non-taken branches + taken loop ends */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490), + /* Instruction-related + other global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff), + /* Data-related global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff), +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x + +static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2), + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2), + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2), + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8), + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8), + }, + }, +}; + +static int xtensa_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + int ret; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) || + cache_op >= C(OP_MAX) || + cache_result >= C(RESULT_MAX)) + return -EINVAL; + + ret = xtensa_cache_ctl[cache_type][cache_op][cache_result]; + + if (ret == 0) + return -EINVAL; + + return ret; +} + +static inline uint32_t xtensa_pmu_read_counter(int idx) +{ + return get_er(XTENSA_PMU_PM(idx)); +} + +static inline void xtensa_pmu_write_counter(int idx, uint32_t v) +{ + set_er(v, XTENSA_PMU_PM(idx)); +} + +static void xtensa_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + uint64_t prev_raw_count, new_raw_count; + int64_t delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = xtensa_pmu_read_counter(event->hw.idx); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static bool xtensa_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + bool rc = false; + s64 left; + + if (!is_sampling_event(event)) { + left = XTENSA_PMU_COUNTER_MAX; + } else { + s64 period = hwc->sample_period; + + left = local64_read(&hwc->period_left); + if (left <= -period) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } else if (left <= 0) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } + if (left > XTENSA_PMU_COUNTER_MAX) + left = XTENSA_PMU_COUNTER_MAX; + } + + local64_set(&hwc->prev_count, -left); + xtensa_pmu_write_counter(idx, -left); + perf_event_update_userpage(event); + + return rc; +} + +static void xtensa_pmu_enable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static void xtensa_pmu_disable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static int xtensa_pmu_event_init(struct perf_event *event) +{ + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || + xtensa_hw_ctl[event->attr.config] == 0) + return -EINVAL; + event->hw.config = xtensa_hw_ctl[event->attr.config]; + return 0; + + case PERF_TYPE_HW_CACHE: + ret = xtensa_pmu_cache_event(event->attr.config); + if (ret < 0) + return ret; + event->hw.config = ret; + return 0; + + case PERF_TYPE_RAW: + /* Not 'previous counter' select */ + if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == + (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT)) + return -EINVAL; + event->hw.config = (event->attr.config & + (XTENSA_PMU_PMCTRL_KRNLCNT | + XTENSA_PMU_PMCTRL_TRACELEVEL | + XTENSA_PMU_PMCTRL_SELECT | + XTENSA_PMU_PMCTRL_MASK)) | + XTENSA_PMU_PMCTRL_INTEN; + return 0; + + default: + return -ENOENT; + } +} + +/* + * Starts/Stops a counter present on the PMU. The PMI handler + * should stop the counter when perf_event_overflow() returns + * !0. ->start() will be used to continue. + */ +static void xtensa_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + xtensa_perf_event_set_period(event, hwc, idx); + } + + hwc->state = 0; + + set_er(hwc->config, XTENSA_PMU_PMCTRL(idx)); +} + +static void xtensa_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(hwc->state & PERF_HES_STOPPED)) { + set_er(0, XTENSA_PMU_PMCTRL(idx)); + set_er(get_er(XTENSA_PMU_PMSTAT(idx)), + XTENSA_PMU_PMSTAT(idx)); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + xtensa_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +/* + * Adds/Removes a counter to/from the PMU, can be done inside + * a transaction, see the ->*_txn() methods. + */ +static int xtensa_pmu_add(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (__test_and_set_bit(idx, ev->used_mask)) { + idx = find_first_zero_bit(ev->used_mask, + XCHAL_NUM_PERF_COUNTERS); + if (idx == XCHAL_NUM_PERF_COUNTERS) + return -EAGAIN; + + __set_bit(idx, ev->used_mask); + hwc->idx = idx; + } + ev->event[idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + xtensa_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + return 0; +} + +static void xtensa_pmu_del(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + + xtensa_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, ev->used_mask); + perf_event_update_userpage(event); +} + +static void xtensa_pmu_read(struct perf_event *event) +{ + xtensa_perf_event_update(event, &event->hw, event->hw.idx); +} + +static int callchain_trace(struct stackframe *frame, void *data) +{ + struct perf_callchain_entry *entry = data; + + perf_callchain_store(entry, frame->pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, NULL, entry); +} + +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, entry); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned i; + + local_irq_save(flags); + pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(), + get_er(XTENSA_PMU_PMG)); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) + pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n", + i, get_er(XTENSA_PMU_PM(i)), + i, get_er(XTENSA_PMU_PMCTRL(i)), + i, get_er(XTENSA_PMU_PMSTAT(i))); + local_irq_restore(flags); +} + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) +{ + irqreturn_t rc = IRQ_NONE; + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + unsigned i; + + for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); + i < XCHAL_NUM_PERF_COUNTERS; + i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { + uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); + struct perf_event *event = ev->event[i]; + struct hw_perf_event *hwc = &event->hw; + u64 last_period; + + if (!(v & XTENSA_PMU_PMSTAT_OVFL)) + continue; + + set_er(v, XTENSA_PMU_PMSTAT(i)); + xtensa_perf_event_update(event, hwc, i); + last_period = hwc->last_period; + if (xtensa_perf_event_set_period(event, hwc, i)) { + struct perf_sample_data data; + struct pt_regs *regs = get_irq_regs(); + + perf_sample_data_init(&data, 0, last_period); + if (perf_event_overflow(event, &data, regs)) + xtensa_pmu_stop(event, 0); + } + + rc = IRQ_HANDLED; + } + return rc; +} + +static struct pmu xtensa_pmu = { + .pmu_enable = xtensa_pmu_enable, + .pmu_disable = xtensa_pmu_disable, + .event_init = xtensa_pmu_event_init, + .add = xtensa_pmu_add, + .del = xtensa_pmu_del, + .start = xtensa_pmu_start, + .stop = xtensa_pmu_stop, + .read = xtensa_pmu_read, +}; + +static void xtensa_pmu_setup(void) +{ + unsigned i; + + set_er(0, XTENSA_PMU_PMG); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) { + set_er(0, XTENSA_PMU_PMCTRL(i)); + set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); + } +} + +static int xtensa_pmu_notifier(struct notifier_block *self, + unsigned long action, void *data) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + xtensa_pmu_setup(); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static int __init xtensa_pmu_init(void) +{ + int ret; + int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); + + perf_cpu_notifier(xtensa_pmu_notifier); +#if XTENSA_FAKE_NMI + enable_irq(irq); +#else + ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, + "pmu", NULL); + if (ret < 0) + return ret; +#endif + + ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); + if (ret) + free_irq(irq, NULL); + + return ret; +} +early_initcall(xtensa_pmu_init); diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 7d2c317bd98b..7538d802b65a 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c @@ -1,11 +1,12 @@ /* - * arch/xtensa/kernel/stacktrace.c + * Kernel and userspace stack tracing. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/export.h> #include <linux/sched.h> @@ -13,6 +14,170 @@ #include <asm/stacktrace.h> #include <asm/traps.h> +#include <asm/uaccess.h> + +#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS) + +/* Address of common_exception_return, used to check the + * transition from kernel to user space. + */ +extern int common_exception_return; + +/* A struct that maps to the part of the frame containing the a0 and + * a1 registers. + */ +struct frame_start { + unsigned long a0; + unsigned long a1; +}; + +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long windowstart = regs->windowstart; + unsigned long windowbase = regs->windowbase; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + unsigned long pc = regs->pc; + struct stackframe frame; + int index; + + if (!depth--) + return; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + + /* Two steps: + * + * 1. Look through the register window for the + * previous PCs in the call trace. + * + * 2. Look on the stack. + */ + + /* Step 1. */ + /* Rotate WINDOWSTART to move the bit corresponding to + * the current window to the bit #0. + */ + windowstart = (windowstart << WSBITS | windowstart) >> windowbase; + + /* Look for bits that are set, they correspond to + * valid windows. + */ + for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) + if (windowstart & (1 << index)) { + /* Get the PC from a0 and a1. */ + pc = MAKE_PC_FROM_RA(a0, pc); + /* Read a0 and a1 from the + * corresponding position in AREGs. + */ + a0 = regs->areg[index * 4]; + a1 = regs->areg[index * 4 + 1]; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } + + /* Step 2. */ + /* We are done with the register window, we need to + * look through the stack. + */ + if (!depth) + return; + + /* Start from the a1 register. */ + /* a1 = regs->areg[1]; */ + while (a0 != 0 && depth--) { + struct frame_start frame_start; + /* Get the location for a1, a0 for the + * previous frame from the current a1. + */ + unsigned long *psp = (unsigned long *)a1; + + psp -= 4; + + /* Check if the region is OK to access. */ + if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) + return; + /* Copy a1, a0 from user space stack frame. */ + if (__copy_from_user_inatomic(&frame_start, psp, + sizeof(frame_start))) + return; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = frame_start.a0; + a1 = frame_start.a1; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } +} +EXPORT_SYMBOL(xtensa_backtrace_user); + +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ? + regs->depc : regs->pc; + unsigned long sp_start, sp_end; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + + sp_start = a1 & ~(THREAD_SIZE - 1); + sp_end = sp_start + THREAD_SIZE; + + /* Spill the register window to the stack first. */ + spill_registers(); + + /* Read the stack frames one by one and create the PC + * from the a0 and a1 registers saved there. + */ + while (a1 > sp_start && a1 < sp_end && depth--) { + struct stackframe frame; + unsigned long *psp = (unsigned long *)a1; + + frame.pc = pc; + frame.sp = a1; + + if (kernel_text_address(pc) && kfn(&frame, data)) + return; + + if (pc == (unsigned long)&common_exception_return) { + regs = (struct pt_regs *)a1; + if (user_mode(regs)) { + if (ufn == NULL) + return; + xtensa_backtrace_user(regs, depth, ufn, data); + return; + } + a0 = regs->areg[0]; + a1 = regs->areg[1]; + continue; + } + + sp_start = a1; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = *(psp - 4); + a1 = *(psp - 3); + } +} +EXPORT_SYMBOL(xtensa_backtrace_kernel); + +#endif void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 9d2f45f010ef..42d441f7898b 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -62,6 +62,7 @@ extern void fast_coprocessor(void); extern void do_illegal_instruction (struct pt_regs*); extern void do_interrupt (struct pt_regs*); +extern void do_nmi(struct pt_regs *); extern void do_unaligned_user (struct pt_regs*); extern void do_multihit (struct pt_regs*, unsigned long); extern void do_page_fault (struct pt_regs*, unsigned long); @@ -146,6 +147,9 @@ COPROCESSOR(6), #if XTENSA_HAVE_COPROCESSOR(7) COPROCESSOR(7), #endif +#if XTENSA_FAKE_NMI +{ EXCCAUSE_MAPPED_NMI, 0, do_nmi }, +#endif { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { -1, -1, 0 } @@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) extern void do_IRQ(int, struct pt_regs *); +#if XTENSA_FAKE_NMI + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); + +DEFINE_PER_CPU(unsigned long, nmi_count); + +void do_nmi(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL) + trace_hardirqs_off(); + + old_regs = set_irq_regs(regs); + nmi_enter(); + ++*this_cpu_ptr(&nmi_count); + xtensa_pmu_irq_handler(0, NULL); + nmi_exit(); + set_irq_regs(old_regs); +} +#endif + void do_interrupt(struct pt_regs *regs) { static const unsigned int_level_mask[] = { @@ -211,8 +237,11 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - struct pt_regs *old_regs = set_irq_regs(regs); + struct pt_regs *old_regs; + + trace_hardirqs_off(); + old_regs = set_irq_regs(regs); irq_enter(); for (;;) { diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 1b397a902292..abcdb527f18a 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector) wsr a0, excsave2 rsr a0, epc\level wsr a0, epc1 + .if \level <= LOCKLEVEL movi a0, EXCCAUSE_LEVEL1_INTERRUPT + .else + movi a0, EXCCAUSE_MAPPED_NMI + .endif wsr a0, exccause rsr a0, eps\level # branch to user or kernel vector @@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4) .align 4 _SimulateUserKernelVectorException: addi a0, a0, (1 << PS_EXCM_BIT) +#if !XTENSA_FAKE_NMI wsr a0, ps +#endif bbsi.l a0, PS_UM_BIT, 1f # branch if user mode - rsr a0, excsave2 # restore a0 + xsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception -1: rsr a0, excsave2 # restore a0 +1: xsr a0, excsave2 # restore a0 j _UserExceptionVector # simulate user vector exception #endif diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 83a44a33cfa1..c9784c1b18d8 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/hardirq.h> +#include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -142,6 +143,12 @@ good_area: } up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (flags & VM_FAULT_MAJOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + else if (flags & VM_FAULT_MINOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + return; /* Something tried to access memory that isn't in our memory map.. diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c index 5f03a593d84f..8f952034e161 100644 --- a/arch/xtensa/oprofile/backtrace.c +++ b/arch/xtensa/oprofile/backtrace.c @@ -2,168 +2,26 @@ * @file backtrace.c * * @remark Copyright 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * @remark Read the file COPYING * */ #include <linux/oprofile.h> -#include <linux/sched.h> -#include <linux/mm.h> #include <asm/ptrace.h> -#include <asm/uaccess.h> -#include <asm/traps.h> +#include <asm/stacktrace.h> -/* Address of common_exception_return, used to check the - * transition from kernel to user space. - */ -extern int common_exception_return; - -/* A struct that maps to the part of the frame containing the a0 and - * a1 registers. - */ -struct frame_start { - unsigned long a0; - unsigned long a1; -}; - -static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth) -{ - unsigned long windowstart = regs->windowstart; - unsigned long windowbase = regs->windowbase; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc); - int index; - - /* First add the current PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - - /* Two steps: - * - * 1. Look through the register window for the - * previous PCs in the call trace. - * - * 2. Look on the stack. - */ - - /* Step 1. */ - /* Rotate WINDOWSTART to move the bit corresponding to - * the current window to the bit #0. - */ - windowstart = (windowstart << WSBITS | windowstart) >> windowbase; - - /* Look for bits that are set, they correspond to - * valid windows. - */ - for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) - if (windowstart & (1 << index)) { - /* Read a0 and a1 from the - * corresponding position in AREGs. - */ - a0 = regs->areg[index * 4]; - a1 = regs->areg[index * 4 + 1]; - /* Get the PC from a0 and a1. */ - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - - /* Step 2. */ - /* We are done with the register window, we need to - * look through the stack. - */ - if (depth > 0) { - /* Start from the a1 register. */ - /* a1 = regs->areg[1]; */ - while (a0 != 0 && depth--) { - - struct frame_start frame_start; - /* Get the location for a1, a0 for the - * previous frame from the current a1. - */ - unsigned long *psp = (unsigned long *)a1; - psp -= 4; - - /* Check if the region is OK to access. */ - if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) - return; - /* Copy a1, a0 from user space stack frame. */ - if (__copy_from_user_inatomic(&frame_start, psp, - sizeof(frame_start))) - return; - - a0 = frame_start.a0; - a1 = frame_start.a1; - pc = MAKE_PC_FROM_RA(a0, pc); - - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - } -} - -static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth) +static int xtensa_backtrace_cb(struct stackframe *frame, void *data) { - unsigned long pc = regs->pc; - unsigned long *psp; - unsigned long sp_start, sp_end; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; - - /* Spill the register window to the stack first. */ - spill_registers(); - - /* Read the stack frames one by one and create the PC - * from the a0 and a1 registers saved there. - */ - while (a1 > sp_start && a1 < sp_end && depth--) { - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - oprofile_add_trace(pc); - if (pc == (unsigned long) &common_exception_return) { - regs = (struct pt_regs *)a1; - if (user_mode(regs)) { - pc = regs->pc; - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - return xtensa_backtrace_user(regs, depth); - } - a0 = regs->areg[0]; - a1 = regs->areg[1]; - continue; - } - - psp = (unsigned long *)a1; - - a0 = *(psp - 4); - a1 = *(psp - 3); - - if (a1 <= (unsigned long)psp) - return; - - } - return; + oprofile_add_trace(frame->pc); + return 0; } void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth) { if (user_mode(regs)) - xtensa_backtrace_user(regs, depth); + xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL); else - xtensa_backtrace_kernel(regs, depth); + xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb, + xtensa_backtrace_cb, NULL); } diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 8ab021b1f141..976a38594537 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -105,13 +105,17 @@ static char *split_if_spec(char *str, ...) va_start(ap, str); while ((arg = va_arg(ap, char**)) != NULL) { - if (*str == '\0') + if (*str == '\0') { + va_end(ap); return NULL; + } end = strchr(str, ','); if (end != str) *arg = str; - if (end == NULL) + if (end == NULL) { + va_end(ap); return NULL; + } *end++ = '\0'; str = end; } |