diff options
Diffstat (limited to 'arch')
221 files changed, 1744 insertions, 1360 deletions
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 612c5eca71bc..7c08b225261c 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -23,7 +23,7 @@ alpha_read_fp_reg (unsigned long reg) if (unlikely(reg >= 32)) return 0; - preempt_enable(); + preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) val = current_thread_info()->fp[reg]; else switch (reg) { @@ -133,7 +133,7 @@ alpha_read_fp_reg_s (unsigned long reg) if (unlikely(reg >= 32)) return 0; - preempt_enable(); + preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) { LDT(0, current_thread_info()->fp[reg]); STS(0, val); diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 2ef651a78fa2..726ecabcef09 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -107,7 +107,7 @@ ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. -KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ +KBSS_SZ = $(shell echo $$(($$($(NM) vmlinux | \ sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \ -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi index 94944cc21931..dd03e3860f97 100644 --- a/arch/arm/boot/dts/e60k02.dtsi +++ b/arch/arm/boot/dts/e60k02.dtsi @@ -311,6 +311,7 @@ &usbotg1 { pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; disable-over-current; srp-disable; hnp-disable; diff --git a/arch/arm/boot/dts/e70k02.dtsi b/arch/arm/boot/dts/e70k02.dtsi index ace3eb8a97b8..4e1bf080eaca 100644 --- a/arch/arm/boot/dts/e70k02.dtsi +++ b/arch/arm/boot/dts/e70k02.dtsi @@ -321,6 +321,7 @@ &usbotg1 { pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; disable-over-current; srp-disable; hnp-disable; diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts index da1399057634..815119c12bd4 100644 --- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts +++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts @@ -625,6 +625,7 @@ &usbotg1 { pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; disable-over-current; srp-disable; hnp-disable; diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi index bf64ba84b358..fde8a19aac0f 100644 --- a/arch/arm/boot/dts/imx6ull-colibri.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi @@ -33,15 +33,9 @@ self-powered; type = "micro"; - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - usb_dr_connector: endpoint { - remote-endpoint = <&usb1_drd_sw>; - }; + port { + usb_dr_connector: endpoint { + remote-endpoint = <&usb1_drd_sw>; }; }; }; diff --git a/arch/arm/boot/dts/imx7d-remarkable2.dts b/arch/arm/boot/dts/imx7d-remarkable2.dts index 8b2f11e85e05..427f8d04ec89 100644 --- a/arch/arm/boot/dts/imx7d-remarkable2.dts +++ b/arch/arm/boot/dts/imx7d-remarkable2.dts @@ -118,8 +118,6 @@ reg = <0x62>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_epdpmic>; - #address-cells = <1>; - #size-cells = <0>; #thermal-sensor-cells = <0>; epd-pwr-good-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts index de2fb1c01b6e..b82381229adf 100644 --- a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts +++ b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts @@ -27,6 +27,16 @@ }; reserved-memory { + sbl_region: sbl@2f00000 { + reg = <0x02f00000 0x100000>; + no-map; + }; + + external_image_region: external-image@3100000 { + reg = <0x03100000 0x200000>; + no-map; + }; + adsp_region: adsp@3300000 { reg = <0x03300000 0x1400000>; no-map; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 2ca76b69add7..511ca864c1b2 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -942,7 +942,7 @@ status = "disabled"; }; - spdif: sound@ff88b0000 { + spdif: sound@ff8b0000 { compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif"; reg = <0x0 0xff8b0000 0x0 0x10000>; #sound-dai-cells = <0>; diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 6dc6fed12af8..8d002c6e6cb3 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -76,7 +76,7 @@ CONFIG_RFKILL=y CONFIG_RFKILL_INPUT=y CONFIG_PCI=y CONFIG_PCI_MSI=y -CONFIG_PCI_IMX6=y +CONFIG_PCI_IMX6_HOST=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 06b48ce23e1c..505a306e0271 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -244,19 +244,6 @@ THUMB( fpreg .req r7 ) .endm #endif - .macro local_bh_disable, ti, tmp - ldr \tmp, [\ti, #TI_PREEMPT] - add \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET - str \tmp, [\ti, #TI_PREEMPT] - .endm - - .macro local_bh_enable_ti, ti, tmp - get_thread_info \ti - ldr \tmp, [\ti, #TI_PREEMPT] - sub \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET - str \tmp, [\ti, #TI_PREEMPT] - .endm - #define USERL(l, x...) \ 9999: x; \ .pushsection __ex_table,"a"; \ diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 14eecaaf295f..e4c2677cc1e9 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -116,7 +116,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) tocopy = n; ua_flags = uaccess_save_and_enable(); - memcpy((void *)to, from, tocopy); + __memcpy((void *)to, from, tocopy); uaccess_restore(ua_flags); to += tocopy; from += tocopy; @@ -178,7 +178,7 @@ __clear_user_memset(void __user *addr, unsigned long n) tocopy = n; ua_flags = uaccess_save_and_enable(); - memset((void *)addr, 0, tocopy); + __memset((void *)addr, 0, tocopy); uaccess_restore(ua_flags); addr += tocopy; n -= tocopy; diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 9a89264cdcc0..7483ef8bccda 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,18 +22,7 @@ @ IRQs enabled. @ ENTRY(do_vfp) - local_bh_disable r10, r4 - ldr r4, .LCvfp - ldr r11, [r10, #TI_CPU] @ CPU number - add r10, r10, #TI_VFPSTATE @ r10 = workspace - ldr pc, [r4] @ call VFP entry point + mov r1, r10 + mov r3, r9 + b vfp_entry ENDPROC(do_vfp) - -ENTRY(vfp_null_entry) - local_bh_enable_ti r10, r4 - ret lr -ENDPROC(vfp_null_entry) - - .align 2 -.LCvfp: - .word vfp_vector diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 26c4f61ecfa3..4d8478264d82 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -6,9 +6,9 @@ * Written by Deep Blue Solutions Limited. * * This code is called from the kernel's undefined instruction trap. - * r9 holds the return address for successful handling. + * r1 holds the thread_info pointer + * r3 holds the return address for successful handling. * lr holds the return address for unrecognised instructions. - * r10 points at the start of the private FP workspace in the thread structure * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) */ #include <linux/init.h> @@ -69,13 +69,15 @@ @ VFP hardware support entry point. @ @ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r1 = thread_info pointer @ r2 = PC value to resume execution after successful emulation -@ r9 = normal "successful" return address -@ r10 = vfp_state union -@ r11 = CPU number +@ r3 = normal "successful" return address @ lr = unrecognised instruction return address @ IRQs enabled. ENTRY(vfp_support_entry) + ldr r11, [r1, #TI_CPU] @ CPU number + add r10, r1, #TI_VFPSTATE @ r10 = workspace + DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 .fpu vfpv2 @@ -85,9 +87,9 @@ ENTRY(vfp_support_entry) bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 - ldr r3, vfp_current_hw_state_address + ldr r9, vfp_current_hw_state_address orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set - ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer + ldr r4, [r9, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled cmp r4, r10 @ this thread owns the hw context? #ifndef CONFIG_SMP @@ -146,7 +148,7 @@ vfp_reload_hw: #endif DBGSTR1 "load state %p", r10 - str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer + str r10, [r9, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state @@ -175,9 +177,12 @@ vfp_hw_state_valid: @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. - local_bh_enable_ti r10, r4 - ret r9 @ we think we have handled things + mov lr, r3 @ we think we have handled things +local_bh_enable_and_ret: + adr r0, . + mov r1, #SOFTIRQ_DISABLE_OFFSET + b __local_bh_enable_ip @ tail call look_for_VFP_exceptions: @ Check for synchronous or asynchronous exception @@ -200,13 +205,12 @@ skip: @ not recognised by VFP DBGSTR "not VFP" - local_bh_enable_ti r10, r4 - ret lr + b local_bh_enable_and_ret process_exception: DBGSTR "bounce" mov r2, sp @ nothing stacked - regdump is at TOS - mov lr, r9 @ setup for a return to the user code. + mov lr, r3 @ setup for a return to the user code. @ Now call the C code to package up the bounce to the support code @ r0 holds the trigger instruction diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 01bc48d73847..349dcb944a93 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -32,10 +32,9 @@ /* * Our undef handlers (in entry.S) */ -asmlinkage void vfp_support_entry(void); -asmlinkage void vfp_null_entry(void); +asmlinkage void vfp_support_entry(u32, void *, u32, u32); -asmlinkage void (*vfp_vector)(void) = vfp_null_entry; +static bool have_vfp __ro_after_init; /* * Dual-use variable. @@ -645,6 +644,25 @@ static int vfp_starting_cpu(unsigned int unused) return 0; } +/* + * Entered with: + * + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) + * r1 = thread_info pointer + * r2 = PC value to resume execution after successful emulation + * r3 = normal "successful" return address + * lr = unrecognised instruction return address + */ +asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc, + u32 resume_return_address) +{ + if (unlikely(!have_vfp)) + return; + + local_bh_disable(); + vfp_support_entry(trigger, ti, resume_pc, resume_return_address); +} + #ifdef CONFIG_KERNEL_MODE_NEON static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) @@ -798,7 +816,6 @@ static int __init vfp_init(void) vfpsid = fmrx(FPSID); barrier(); unregister_undef_hook(&vfp_detect_hook); - vfp_vector = vfp_null_entry; pr_info("VFP support v0.3: "); if (VFP_arch) { @@ -883,7 +900,7 @@ static int __init vfp_init(void) "arm/vfp:starting", vfp_starting_cpu, vfp_dying_cpu); - vfp_vector = vfp_support_entry; + have_vfp = true; thread_register_notifier(&vfp_notifier_block); vfp_pm_init(); diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 123a56f7f818..feb27a0ccfb4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -1571,15 +1571,20 @@ dmc: bus@38000 { compatible = "simple-bus"; - reg = <0x0 0x38000 0x0 0x400>; #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>; + ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>; canvas: video-lut@48 { compatible = "amlogic,canvas"; reg = <0x0 0x48 0x0 0x14>; }; + + pmu: pmu@80 { + reg = <0x0 0x80 0x0 0x40>, + <0x0 0xc00 0x0 0x40>; + interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>; + }; }; usb2_phy1: phy@3a000 { @@ -1705,12 +1710,6 @@ }; }; - pmu: pmu@ff638000 { - reg = <0x0 0xff638000 0x0 0x100>, - <0x0 0xff638c00 0x0 0x100>; - interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>; - }; - aobus: bus@ff800000 { compatible = "simple-bus"; reg = <0x0 0xff800000 0x0 0x100000>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts index af9194eca556..73eb6061c73e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts @@ -56,14 +56,10 @@ }; &enetc_port2 { - nvmem-cells = <&base_mac_address 2>; - nvmem-cell-names = "mac-address"; status = "okay"; }; &enetc_port3 { - nvmem-cells = <&base_mac_address 3>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -84,8 +80,6 @@ managed = "in-band-status"; phy-handle = <&qsgmii_phy0>; phy-mode = "qsgmii"; - nvmem-cells = <&base_mac_address 4>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -94,8 +88,6 @@ managed = "in-band-status"; phy-handle = <&qsgmii_phy1>; phy-mode = "qsgmii"; - nvmem-cells = <&base_mac_address 5>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -104,8 +96,6 @@ managed = "in-band-status"; phy-handle = <&qsgmii_phy2>; phy-mode = "qsgmii"; - nvmem-cells = <&base_mac_address 6>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -114,8 +104,6 @@ managed = "in-band-status"; phy-handle = <&qsgmii_phy3>; phy-mode = "qsgmii"; - nvmem-cells = <&base_mac_address 7>; - nvmem-cell-names = "mac-address"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts index 1f34c7553459..7cd29ab970d9 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts @@ -55,7 +55,5 @@ &enetc_port1 { phy-handle = <&phy0>; phy-mode = "rgmii-id"; - nvmem-cells = <&base_mac_address 0>; - nvmem-cell-names = "mac-address"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts index aac41192caa1..113b1df74bf8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts @@ -36,14 +36,10 @@ }; &enetc_port2 { - nvmem-cells = <&base_mac_address 2>; - nvmem-cell-names = "mac-address"; status = "okay"; }; &enetc_port3 { - nvmem-cells = <&base_mac_address 3>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -56,8 +52,6 @@ managed = "in-band-status"; phy-handle = <&phy0>; phy-mode = "sgmii"; - nvmem-cells = <&base_mac_address 0>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -66,8 +60,6 @@ managed = "in-band-status"; phy-handle = <&phy1>; phy-mode = "sgmii"; - nvmem-cells = <&base_mac_address 1>; - nvmem-cell-names = "mac-address"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts index a4421db3784e..9b5e92fb753e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts @@ -43,7 +43,5 @@ &enetc_port1 { phy-handle = <&phy1>; phy-mode = "rgmii-id"; - nvmem-cells = <&base_mac_address 1>; - nvmem-cell-names = "mac-address"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts index 8b65af4a7147..4ab17b984b03 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts @@ -92,8 +92,6 @@ phy-handle = <&phy0>; phy-mode = "sgmii"; managed = "in-band-status"; - nvmem-cells = <&base_mac_address 0>; - nvmem-cell-names = "mac-address"; status = "okay"; }; @@ -156,21 +154,6 @@ label = "bootloader environment"; }; }; - - otp-1 { - compatible = "user-otp"; - - nvmem-layout { - compatible = "kontron,sl28-vpd"; - - serial_number: serial-number { - }; - - base_mac_address: base-mac-address { - #nvmem-cell-cells = <1>; - }; - }; - }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi index 1f3d225e64ec..06b94bbc2b97 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi @@ -117,7 +117,7 @@ lsio_subsys: bus@5d000000 { interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>, <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>; - clock-names = "fspi", "fspi_en"; + clock-names = "fspi_en", "fspi"; power-domains = <&pd IMX_SC_R_FSPI_0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts index 1bcf228a22b8..852420349c01 100644 --- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts @@ -121,8 +121,6 @@ phy-handle = <ðphy0>; nvmem-cells = <&fec_mac1>; nvmem-cell-names = "mac-address"; - snps,reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>; - snps,reset-delays-us = <10 20 200000>; status = "okay"; mdio { @@ -136,6 +134,9 @@ eee-broken-1000t; qca,disable-smarteee; qca,disable-hibernation-mode; + reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>; + reset-assert-us = <20>; + reset-deassert-us = <200000>; vddio-supply = <&vddio0>; vddio0: vddio-regulator { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi index d1a6390976a9..3f9dfd4d3884 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi @@ -194,7 +194,7 @@ rohm,reset-snvs-powered; #clock-cells = <0>; - clocks = <&osc_32k 0>; + clocks = <&osc_32k>; clock-output-names = "clk-32k-out"; regulators { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts index 6357078185ed..0e8f0d7161ad 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts @@ -247,7 +247,7 @@ compatible = "wlf,wm8960"; reg = <0x1a>; clocks = <&clk IMX8MM_CLK_SAI1_ROOT>; - clock-names = "mclk1"; + clock-names = "mclk"; wlf,shared-lrclk; #sound-dai-cells = <0>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi index 88321b5b0693..6f0811587142 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi @@ -99,7 +99,7 @@ compatible = "regulator-fixed"; enable-active-high; gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */ - off-on-delay = <500000>; + off-on-delay-us = <500000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_eth>; regulator-always-on; @@ -139,7 +139,7 @@ enable-active-high; /* Verdin SD_1_PWR_EN (SODIMM 76) */ gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>; - off-on-delay = <100000>; + off-on-delay-us = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2_pwr_en>; regulator-max-microvolt = <3300000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index ed9ac6c5047c..9e0ddd6b7a32 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -296,6 +296,7 @@ sai2: sai@30020000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30020000 0x10000>; + #sound-dai-cells = <0>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI2_IPG>, <&clk IMX8MN_CLK_DUMMY>, @@ -310,6 +311,7 @@ sai3: sai@30030000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30030000 0x10000>; + #sound-dai-cells = <0>; interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI3_IPG>, <&clk IMX8MN_CLK_DUMMY>, @@ -324,6 +326,7 @@ sai5: sai@30050000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30050000 0x10000>; + #sound-dai-cells = <0>; interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI5_IPG>, <&clk IMX8MN_CLK_DUMMY>, @@ -340,6 +343,7 @@ sai6: sai@30060000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30060000 0x10000>; + #sound-dai-cells = <0>; interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI6_IPG>, <&clk IMX8MN_CLK_DUMMY>, @@ -397,6 +401,7 @@ sai7: sai@300b0000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x300b0000 0x10000>; + #sound-dai-cells = <0>; interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI7_IPG>, <&clk IMX8MN_CLK_DUMMY>, diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi index 361426c0da0a..c29622529200 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi @@ -10,7 +10,7 @@ compatible = "regulator-fixed"; enable-active-high; gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */ - off-on-delay = <500000>; + off-on-delay-us = <500000>; regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; regulator-name = "+V3.3_ETH"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi index 0dd6180a8e39..1608775da0ad 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi @@ -87,7 +87,7 @@ compatible = "regulator-fixed"; enable-active-high; gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */ - off-on-delay = <500000>; + off-on-delay-us = <500000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_eth>; regulator-always-on; @@ -128,7 +128,7 @@ enable-active-high; /* Verdin SD_1_PWR_EN (SODIMM 76) */ gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>; - off-on-delay = <100000>; + off-on-delay-us = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2_pwr_en>; regulator-max-microvolt = <3300000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index a19224fe1a6a..a237275ee017 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -1128,11 +1128,11 @@ lcdif2: display-controller@32e90000 { compatible = "fsl,imx8mp-lcdif"; - reg = <0x32e90000 0x238>; + reg = <0x32e90000 0x10000>; interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT>, - <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>, - <&clk IMX8MP_CLK_MEDIA_APB_ROOT>; + <&clk IMX8MP_CLK_MEDIA_APB_ROOT>, + <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>; clock-names = "pix", "axi", "disp_axi"; assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>, <&clk IMX8MP_VIDEO_PLL1>; diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi index 2076f9c9983a..41efd97dd6d6 100644 --- a/arch/arm64/boot/dts/freescale/imx93.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93.dtsi @@ -164,6 +164,8 @@ lpi2c1: i2c@44340000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x44340000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C1_GATE>, <&clk IMX93_CLK_BUS_AON>; @@ -174,6 +176,8 @@ lpi2c2: i2c@44350000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x44350000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C2_GATE>, <&clk IMX93_CLK_BUS_AON>; @@ -343,6 +347,8 @@ lpi2c3: i2c@42530000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x42530000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C3_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -353,6 +359,8 @@ lpi2c4: i2c@42540000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x42540000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C4_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -455,6 +463,8 @@ lpi2c5: i2c@426b0000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x426b0000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C5_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -465,6 +475,8 @@ lpi2c6: i2c@426c0000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x426c0000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C6_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -475,6 +487,8 @@ lpi2c7: i2c@426d0000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x426d0000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C7_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -485,6 +499,8 @@ lpi2c8: i2c@426e0000 { compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c"; reg = <0x426e0000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX93_CLK_LPI2C8_GATE>, <&clk IMX93_CLK_BUS_WAKEUP>; @@ -580,9 +596,9 @@ eqos: ethernet@428a0000 { compatible = "nxp,imx93-dwmac-eqos", "snps,dwmac-5.10a"; reg = <0x428a0000 0x10000>; - interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "eth_wake_irq", "macirq"; + interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; clocks = <&clk IMX93_CLK_ENET_QOS_GATE>, <&clk IMX93_CLK_ENET_QOS_GATE>, <&clk IMX93_CLK_ENET_TIMER2>, @@ -595,7 +611,7 @@ <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>; assigned-clock-rates = <100000000>, <250000000>; intf_mode = <&wakeupmix_gpr 0x28>; - clk_csr = <0>; + snps,clk-csr = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 133dbe5b429d..7096b999b33f 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -22,7 +22,7 @@ #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>; + ranges = <0x0 0x0 0x0 0x0 0x100 0x0>; apbmisc: misc@100000 { compatible = "nvidia,tegra194-misc"; diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index 8fe8eda7654d..f1748cff8a33 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -20,7 +20,7 @@ #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>; + ranges = <0x0 0x0 0x0 0x0 0x100 0x0>; misc@100000 { compatible = "nvidia,tegra234-misc"; diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts index ca3f96646b90..5cf07caf4103 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts +++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts @@ -62,11 +62,11 @@ perst-gpios = <&tlmm 58 GPIO_ACTIVE_LOW>; }; -&pcie_phy0 { +&pcie_qmp0 { status = "okay"; }; -&pcie_phy1 { +&pcie_qmp1 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi index 651a231554e0..1b8379ba87f9 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi @@ -48,11 +48,11 @@ perst-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>; }; -&pcie_phy0 { +&pcie_qmp0 { status = "okay"; }; -&pcie_phy1 { +&pcie_qmp1 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916-thwc-uf896.dts b/arch/arm64/boot/dts/qcom/msm8916-thwc-uf896.dts index c492db856190..82e260375174 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-thwc-uf896.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-thwc-uf896.dts @@ -33,7 +33,3 @@ &gpio_leds_default { pins = "gpio81", "gpio82", "gpio83"; }; - -&sim_ctrl_default { - pins = "gpio1", "gpio2"; -}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-thwc-ufi001c.dts b/arch/arm64/boot/dts/qcom/msm8916-thwc-ufi001c.dts index 700cf81cbf8c..8433c9710b1c 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-thwc-ufi001c.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-thwc-ufi001c.dts @@ -25,6 +25,11 @@ gpios = <&msmgpio 20 GPIO_ACTIVE_HIGH>; }; +&mpss { + pinctrl-0 = <&sim_ctrl_default>; + pinctrl-names = "default"; +}; + &button_default { pins = "gpio37"; bias-pull-down; @@ -34,6 +39,25 @@ pins = "gpio20", "gpio21", "gpio22"; }; -&sim_ctrl_default { - pins = "gpio1", "gpio2"; +/* This selects the external SIM card slot by default */ +&msmgpio { + sim_ctrl_default: sim-ctrl-default-state { + esim-sel-pins { + pins = "gpio0", "gpio3"; + bias-disable; + output-low; + }; + + sim-en-pins { + pins = "gpio1"; + bias-disable; + output-low; + }; + + sim-sel-pins { + pins = "gpio2"; + bias-disable; + output-high; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi index 790a9696da9d..cdf34b74fa8f 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi @@ -92,9 +92,6 @@ }; &mpss { - pinctrl-0 = <&sim_ctrl_default>; - pinctrl-names = "default"; - status = "okay"; }; @@ -240,11 +237,4 @@ drive-strength = <2>; bias-disable; }; - - sim_ctrl_default: sim-ctrl-default-state { - function = "gpio"; - drive-strength = <2>; - bias-disable; - output-low; - }; }; diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index aa0a7bd7307c..dd924331b0ee 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -1012,7 +1012,7 @@ left_spkr: speaker@0,3 { compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -1021,7 +1021,7 @@ right_spkr: speaker@0,4 { compatible = "sdw10217211000"; reg = <0 4>; - powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; #sound-dai-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts index 3ccb5ffdb3ca..24fa449d48a6 100644 --- a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts +++ b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts @@ -241,7 +241,7 @@ }; &remoteproc_nsp0 { - firmware-name = "qcom/sa8540p/cdsp.mbn"; + firmware-name = "qcom/sa8540p/cdsp0.mbn"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi index b6137816f2f3..313083ec1f39 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi @@ -464,7 +464,7 @@ ap_i2c_tpm: &i2c14 { &mdss_dp_out { data-lanes = <0 1>; - link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>; + link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000>; }; &mdss_mdp { diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index bdcb74925313..8f4ab6bd2886 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -2131,6 +2131,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pcie1_clkreq_n>; + dma-coherent; + iommus = <&apps_smmu 0x1c80 0x1>; iommu-map = <0x0 &apps_smmu 0x1c80 0x1>, diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts index 98e71b933437..99c6d6574559 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts @@ -370,6 +370,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-always-on; }; vreg_s11b: smps11 { @@ -377,6 +378,7 @@ regulator-min-microvolt = <1272000>; regulator-max-microvolt = <1272000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-always-on; }; vreg_s12b: smps12 { @@ -384,6 +386,7 @@ regulator-min-microvolt = <984000>; regulator-max-microvolt = <984000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-always-on; }; vreg_l3b: ldo3 { @@ -441,6 +444,7 @@ regulator-min-microvolt = <3008000>; regulator-max-microvolt = <3960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>; + regulator-always-on; }; }; @@ -772,75 +776,88 @@ pmic-die-temp@3 { reg = <PMK8350_ADC7_DIE_TEMP>; qcom,pre-scaling = <1 1>; + label = "pmk8350_die_temp"; }; xo-therm@44 { reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "pmk8350_xo_therm"; }; pmic-die-temp@103 { reg = <PM8350_ADC7_DIE_TEMP(1)>; qcom,pre-scaling = <1 1>; + label = "pmc8280_1_die_temp"; }; sys-therm@144 { reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm1"; }; sys-therm@145 { reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm2"; }; sys-therm@146 { reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm3"; }; sys-therm@147 { reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm4"; }; pmic-die-temp@303 { reg = <PM8350_ADC7_DIE_TEMP(3)>; qcom,pre-scaling = <1 1>; + label = "pmc8280_2_die_temp"; }; sys-therm@344 { reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm5"; }; sys-therm@345 { reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm6"; }; sys-therm@346 { reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm7"; }; sys-therm@347 { reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>; qcom,hw-settle-time = <200>; qcom,ratiometric; + label = "sys_therm8"; }; pmic-die-temp@403 { reg = <PMR735A_ADC7_DIE_TEMP>; qcom,pre-scaling = <1 1>; + label = "pmr735a_die_temp"; }; }; @@ -884,9 +901,9 @@ "VA DMIC0", "MIC BIAS1", "VA DMIC1", "MIC BIAS1", "VA DMIC2", "MIC BIAS3", - "TX DMIC0", "MIC BIAS1", - "TX DMIC1", "MIC BIAS2", - "TX DMIC2", "MIC BIAS3", + "VA DMIC0", "VA MIC BIAS1", + "VA DMIC1", "VA MIC BIAS1", + "VA DMIC2", "VA MIC BIAS3", "TX SWR_ADC1", "ADC2_OUTPUT"; wcd-playback-dai-link { @@ -937,7 +954,7 @@ va-dai-link { link-name = "VA Capture"; cpu { - sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>; + sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>; }; platform { @@ -1062,7 +1079,7 @@ vdd-micb-supply = <&vreg_s10b>; - qcom,dmic-sample-rate = <600000>; + qcom,dmic-sample-rate = <4800000>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi index df7d28f7ae60..be446eba4fa7 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi @@ -59,8 +59,9 @@ #size-cells = <0>; pmk8280_pon: pon@1300 { - compatible = "qcom,pm8998-pon"; - reg = <0x1300>; + compatible = "qcom,pmk8350-pon"; + reg = <0x1300>, <0x800>; + reg-names = "hlos", "pbs"; pmk8280_pon_pwrkey: pwrkey { compatible = "qcom,pmk8350-pwrkey"; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 0d02599d8867..42bfa9fa5b96 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -2504,12 +2504,12 @@ qcom,ports-sinterval-low = /bits/ 8 <0x03 0x1f 0x1f 0x07 0x00>; qcom,ports-offset1 = /bits/ 8 <0x00 0x00 0x0B 0x01 0x00>; qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x0B 0x00 0x00>; - qcom,ports-hstart = /bits/ 8 <0xff 0x03 0xff 0xff 0xff>; - qcom,ports-hstop = /bits/ 8 <0xff 0x06 0xff 0xff 0xff>; + qcom,ports-hstart = /bits/ 8 <0xff 0x03 0x00 0xff 0xff>; + qcom,ports-hstop = /bits/ 8 <0xff 0x06 0x0f 0xff 0xff>; qcom,ports-word-length = /bits/ 8 <0x01 0x07 0x04 0xff 0xff>; - qcom,ports-block-pack-mode = /bits/ 8 <0xff 0x00 0x01 0xff 0xff>; + qcom,ports-block-pack-mode = /bits/ 8 <0xff 0xff 0x01 0xff 0xff>; qcom,ports-lane-control = /bits/ 8 <0x01 0x00 0x00 0x00 0x00>; - qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0x00>; + qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0xff>; #sound-dai-cells = <1>; #address-cells = <2>; @@ -2600,7 +2600,7 @@ <&intc GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "core", "wake"; - clocks = <&vamacro>; + clocks = <&txmacro>; clock-names = "iface"; label = "TX"; #sound-dai-cells = <1>; @@ -2609,15 +2609,15 @@ qcom,din-ports = <4>; qcom,dout-ports = <0>; - qcom,ports-sinterval-low = /bits/ 8 <0x01 0x03 0x03 0x03>; - qcom,ports-offset1 = /bits/ 8 <0x01 0x00 0x02 0x01>; + qcom,ports-sinterval-low = /bits/ 8 <0x01 0x01 0x03 0x03>; + qcom,ports-offset1 = /bits/ 8 <0x01 0x00 0x02 0x00>; qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x00 0x00>; qcom,ports-block-pack-mode = /bits/ 8 <0xff 0xff 0xff 0xff>; qcom,ports-hstart = /bits/ 8 <0xff 0xff 0xff 0xff>; qcom,ports-hstop = /bits/ 8 <0xff 0xff 0xff 0xff>; - qcom,ports-word-length = /bits/ 8 <0xff 0x00 0xff 0xff>; + qcom,ports-word-length = /bits/ 8 <0xff 0xff 0xff 0xff>; qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff>; - qcom,ports-lane-control = /bits/ 8 <0x00 0x01 0x00 0x00>; + qcom,ports-lane-control = /bits/ 8 <0x00 0x01 0x00 0x01>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index 67d2a663ce75..5c688cb6a7ce 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -753,7 +753,7 @@ left_spkr: speaker@0,3 { compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -761,7 +761,7 @@ right_spkr: speaker@0,4 { compatible = "sdw10217211000"; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>; reg = <0 4>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts index 9850140514ba..41f59e32af64 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts @@ -662,7 +662,7 @@ left_spkr: speaker@0,3 { compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -670,7 +670,7 @@ right_spkr: speaker@0,4 { compatible = "sdw10217211000"; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>; reg = <0 4>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi index 4d6ec815b78b..fbd67d2c8d78 100644 --- a/arch/arm64/boot/dts/qcom/sm6115.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi @@ -1078,6 +1078,7 @@ dma-names = "tx", "rx"; #address-cells = <1>; #size-cells = <0>; + status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi index 31b88c738510..068ee4f72485 100644 --- a/arch/arm64/boot/dts/qcom/sm6375.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi @@ -1209,6 +1209,7 @@ clock-names = "xo"; power-domains = <&rpmpd SM6375_VDDCX>; + power-domain-names = "cx"; memory-region = <&pil_cdsp_mem>; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index fd20096cfc6e..13e0ce828606 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1826,7 +1826,7 @@ "slave_q2a", "tbu"; - iommus = <&apps_smmu 0x1d80 0x7f>; + iommus = <&apps_smmu 0x1d80 0x3f>; iommu-map = <0x0 &apps_smmu 0x1d80 0x1>, <0x100 &apps_smmu 0x1d81 0x1>; @@ -1925,7 +1925,7 @@ assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>; assigned-clock-rates = <19200000>; - iommus = <&apps_smmu 0x1e00 0x7f>; + iommus = <&apps_smmu 0x1e00 0x3f>; iommu-map = <0x0 &apps_smmu 0x1e00 0x1>, <0x100 &apps_smmu 0x1e01 0x1>; diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts index e54cdc8bc31f..4c9de236676d 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts @@ -764,7 +764,7 @@ left_spkr: speaker@0,3 { compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -773,7 +773,7 @@ right_spkr: speaker@0,4 { compatible = "sdw10217211000"; reg = <0 4>; - powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_LOW>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; #sound-dai-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish.dts b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish.dts index acaa99c5ff8b..a85d47f7a9e8 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish.dts @@ -625,6 +625,6 @@ }; &venus { - firmware-name = "qcom/sm8250/elish/venus.mbn"; + firmware-name = "qcom/sm8250/xiaomi/elish/venus.mbn"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 1c97e28da6ad..1a5a612d4234 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -1664,6 +1664,7 @@ power-domains = <&gcc UFS_PHY_GDSC>; iommus = <&apps_smmu 0xe0 0x0>; + dma-coherent; clock-names = "core_clk", diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index 1a744a33bcf4..b285b1530c10 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -2143,8 +2143,8 @@ <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&vamacro>; clock-names = "mclk", "npl", "macro", "dcodec", "fsgen"; - assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>; + assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, + <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>; assigned-clock-rates = <19200000>, <19200000>; #clock-cells = <0>; @@ -4003,6 +4003,7 @@ power-domains = <&gcc UFS_PHY_GDSC>; iommus = <&apps_smmu 0xe0 0x0>; + dma-coherent; interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>, <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>; diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi index ff4d342c0725..5d0888398b3c 100644 --- a/arch/arm64/boot/dts/qcom/sm8550.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi @@ -66,7 +66,7 @@ CPU0: cpu@0 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a510"; reg = <0 0>; enable-method = "psci"; next-level-cache = <&L2_0>; @@ -89,7 +89,7 @@ CPU1: cpu@100 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a510"; reg = <0 0x100>; enable-method = "psci"; next-level-cache = <&L2_100>; @@ -108,7 +108,7 @@ CPU2: cpu@200 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a510"; reg = <0 0x200>; enable-method = "psci"; next-level-cache = <&L2_200>; @@ -127,7 +127,7 @@ CPU3: cpu@300 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a715"; reg = <0 0x300>; enable-method = "psci"; next-level-cache = <&L2_300>; @@ -146,7 +146,7 @@ CPU4: cpu@400 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a715"; reg = <0 0x400>; enable-method = "psci"; next-level-cache = <&L2_400>; @@ -165,7 +165,7 @@ CPU5: cpu@500 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a710"; reg = <0 0x500>; enable-method = "psci"; next-level-cache = <&L2_500>; @@ -184,7 +184,7 @@ CPU6: cpu@600 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-a710"; reg = <0 0x600>; enable-method = "psci"; next-level-cache = <&L2_600>; @@ -203,7 +203,7 @@ CPU7: cpu@700 { device_type = "cpu"; - compatible = "qcom,kryo"; + compatible = "arm,cortex-x3"; reg = <0 0x700>; enable-method = "psci"; next-level-cache = <&L2_700>; @@ -1905,6 +1905,7 @@ required-opps = <&rpmhpd_opp_nom>; iommus = <&apps_smmu 0x60 0x0>; + dma-coherent; interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>, <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>; @@ -1997,7 +1998,7 @@ lpass_tlmm: pinctrl@6e80000 { compatible = "qcom,sm8550-lpass-lpi-pinctrl"; reg = <0 0x06e80000 0 0x20000>, - <0 0x0725a000 0 0x10000>; + <0 0x07250000 0 0x10000>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&lpass_tlmm 0 0 23>; @@ -2691,7 +2692,7 @@ pins = "gpio28", "gpio29"; function = "qup1_se0"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c1_data_clk: qup-i2c1-data-clk-state { @@ -2699,7 +2700,7 @@ pins = "gpio32", "gpio33"; function = "qup1_se1"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c2_data_clk: qup-i2c2-data-clk-state { @@ -2707,7 +2708,7 @@ pins = "gpio36", "gpio37"; function = "qup1_se2"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c3_data_clk: qup-i2c3-data-clk-state { @@ -2715,7 +2716,7 @@ pins = "gpio40", "gpio41"; function = "qup1_se3"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c4_data_clk: qup-i2c4-data-clk-state { @@ -2723,7 +2724,7 @@ pins = "gpio44", "gpio45"; function = "qup1_se4"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c5_data_clk: qup-i2c5-data-clk-state { @@ -2731,7 +2732,7 @@ pins = "gpio52", "gpio53"; function = "qup1_se5"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c6_data_clk: qup-i2c6-data-clk-state { @@ -2739,7 +2740,7 @@ pins = "gpio48", "gpio49"; function = "qup1_se6"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c8_data_clk: qup-i2c8-data-clk-state { @@ -2747,14 +2748,14 @@ pins = "gpio57"; function = "qup2_se0_l1_mira"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; sda-pins { pins = "gpio56"; function = "qup2_se0_l0_mira"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; }; @@ -2763,7 +2764,7 @@ pins = "gpio60", "gpio61"; function = "qup2_se1"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c10_data_clk: qup-i2c10-data-clk-state { @@ -2771,7 +2772,7 @@ pins = "gpio64", "gpio65"; function = "qup2_se2"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c11_data_clk: qup-i2c11-data-clk-state { @@ -2779,7 +2780,7 @@ pins = "gpio68", "gpio69"; function = "qup2_se3"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c12_data_clk: qup-i2c12-data-clk-state { @@ -2787,7 +2788,7 @@ pins = "gpio2", "gpio3"; function = "qup2_se4"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c13_data_clk: qup-i2c13-data-clk-state { @@ -2795,7 +2796,7 @@ pins = "gpio80", "gpio81"; function = "qup2_se5"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_i2c15_data_clk: qup-i2c15-data-clk-state { @@ -2803,7 +2804,7 @@ pins = "gpio72", "gpio106"; function = "qup2_se7"; drive-strength = <2>; - bias-pull-up; + bias-pull-up = <2200>; }; qup_spi0_cs: qup-spi0-cs-state { diff --git a/arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts b/arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts index 61b31688b469..ce318e05f0a6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts +++ b/arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts @@ -24,6 +24,8 @@ &internal_display { compatible = "elida,kd35t133"; + iovcc-supply = <&vcc_lcd>; + vdd-supply = <&vcc_lcd>; }; &pwm0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi index 04eba432fb0e..80fc53c807a4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi @@ -235,10 +235,8 @@ internal_display: panel@0 { reg = <0>; backlight = <&backlight>; - iovcc-supply = <&vcc_lcd>; reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>; rotation = <270>; - vdd-supply = <&vcc_lcd>; port { mipi_in_panel: endpoint { diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts index 139c898e590e..d94ac81eb4e6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts +++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts @@ -83,6 +83,8 @@ &internal_display { compatible = "elida,kd35t133"; + iovcc-supply = <&vcc_lcd>; + vdd-supply = <&vcc_lcd>; }; &rk817 { diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts index 4702183b673c..aa6f5b12206b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts @@ -59,6 +59,8 @@ &internal_display { compatible = "elida,kd35t133"; + iovcc-supply = <&vcc_lcd>; + vdd-supply = <&vcc_lcd>; }; &rk817_charger { diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi index 083452c67711..e47d1398aeca 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi @@ -61,7 +61,6 @@ pinctrl-names = "default"; pinctrl-0 = <&bl_en>; pwms = <&pwm0 0 1000000 PWM_POLARITY_INVERTED>; - pwm-delay-us = <10000>; }; emmc_pwrseq: emmc-pwrseq { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index ee6095baba4d..5c1929d41cc0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -198,7 +198,6 @@ power-supply = <&pp3300_disp>; pinctrl-names = "default"; pinctrl-0 = <&bl_en>; - pwm-delay-us = <10000>; }; gpio_keys: gpio-keys { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi index a47d9f758611..c5e7de60c121 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi @@ -167,7 +167,6 @@ pinctrl-names = "default"; pinctrl-0 = <&bl_en>; pwms = <&pwm1 0 1000000 0>; - pwm-delay-us = <10000>; }; dmic: dmic { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts index 194e48c755f6..ddd45de97950 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts @@ -50,19 +50,9 @@ pinctrl-0 = <&panel_en_pin>; power-supply = <&vcc3v3_panel>; - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - panel_in_edp: endpoint@0 { - reg = <0>; - remote-endpoint = <&edp_out_panel>; - }; + port { + panel_in_edp: endpoint { + remote-endpoint = <&edp_out_panel>; }; }; }; @@ -943,7 +933,7 @@ disable-wp; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; - sd-uhs-sdr104; + sd-uhs-sdr50; vmmc-supply = <&vcc3v0_sd>; vqmmc-supply = <&vcc_sdio>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi index 78157521e944..bca2b50e0a93 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi @@ -647,16 +647,10 @@ avdd-supply = <&avdd>; backlight = <&backlight>; dvdd-supply = <&vcc3v3_s0>; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - - mipi_in_panel: endpoint { - remote-endpoint = <&mipi_out_panel>; - }; + port { + mipi_in_panel: endpoint { + remote-endpoint = <&mipi_out_panel>; }; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 1881b4b71f91..40e7c4a70055 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -552,7 +552,7 @@ <0x0 0xfff10000 0 0x10000>, /* GICH */ <0x0 0xfff20000 0 0x10000>; /* GICV */ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>; - its: interrupt-controller@fee20000 { + its: msi-controller@fee20000 { compatible = "arm,gic-v3-its"; msi-controller; #msi-cells = <1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi index 65a80d1f6d91..9a0e217f069f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi @@ -16,8 +16,10 @@ }; &cru { - assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>; - assigned-clock-rates = <1200000000>, <200000000>, <241500000>; + assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>, + <&pmucru PLL_PPLL>, <&cru PLL_VPLL>; + assigned-clock-rates = <32768>, <1200000000>, + <200000000>, <241500000>; }; &gpio_keys_control { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts index b4b2df821cba..c763c7f3b1b3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts @@ -105,8 +105,10 @@ }; &cru { - assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>; - assigned-clock-rates = <1200000000>, <200000000>, <500000000>; + assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>, + <&pmucru PLL_PPLL>, <&cru PLL_VPLL>; + assigned-clock-rates = <32768>, <1200000000>, + <200000000>, <500000000>; }; &dsi_dphy0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi index ce7165d7f1a1..102e448bc026 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi @@ -598,7 +598,7 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>; - sd-uhs-sdr104; + sd-uhs-sdr50; vmmc-supply = <&vcc3v3_sys>; vqmmc-supply = <&vcc_1v8>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi index 005cde61b4b2..a506948b5572 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi @@ -222,6 +222,7 @@ cache-size = <131072>; cache-line-size = <64>; cache-sets = <512>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -230,6 +231,7 @@ cache-size = <131072>; cache-line-size = <64>; cache-sets = <512>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -238,6 +240,7 @@ cache-size = <131072>; cache-line-size = <64>; cache-sets = <512>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -246,6 +249,7 @@ cache-size = <131072>; cache-line-size = <64>; cache-sets = <512>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -254,6 +258,7 @@ cache-size = <524288>; cache-line-size = <64>; cache-sets = <1024>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -262,6 +267,7 @@ cache-size = <524288>; cache-line-size = <64>; cache-sets = <1024>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -270,6 +276,7 @@ cache-size = <524288>; cache-line-size = <64>; cache-sets = <1024>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -278,6 +285,7 @@ cache-size = <524288>; cache-line-size = <64>; cache-sets = <1024>; + cache-level = <2>; next-level-cache = <&l3_cache>; }; @@ -286,6 +294,7 @@ cache-size = <3145728>; cache-line-size = <64>; cache-sets = <4096>; + cache-level = <3>; }; }; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a1892a8f6032..3dd691c85ca0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -193,6 +193,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; + /* Timers */ + struct arch_timer_vm_data timer_data; + /* Mandated version of PSCI */ u32 psci_version; @@ -573,9 +576,22 @@ struct kvm_vcpu_arch { ({ \ __build_check_flag(v, flagset, f, m); \ \ - v->arch.flagset & (m); \ + READ_ONCE(v->arch.flagset) & (m); \ }) +/* + * Note that the set/clear accessors must be preempt-safe in order to + * avoid nesting them with load/put which also manipulate flags... + */ +#ifdef __KVM_NVHE_HYPERVISOR__ +/* the nVHE hypervisor is always non-preemptible */ +#define __vcpu_flags_preempt_disable() +#define __vcpu_flags_preempt_enable() +#else +#define __vcpu_flags_preempt_disable() preempt_disable() +#define __vcpu_flags_preempt_enable() preempt_enable() +#endif + #define __vcpu_set_flag(v, flagset, f, m) \ do { \ typeof(v->arch.flagset) *fset; \ @@ -583,9 +599,11 @@ struct kvm_vcpu_arch { __build_check_flag(v, flagset, f, m); \ \ fset = &v->arch.flagset; \ + __vcpu_flags_preempt_disable(); \ if (HWEIGHT(m) > 1) \ *fset &= ~(m); \ *fset |= (f); \ + __vcpu_flags_preempt_enable(); \ } while (0) #define __vcpu_clear_flag(v, flagset, f, m) \ @@ -595,7 +613,9 @@ struct kvm_vcpu_arch { __build_check_flag(v, flagset, f, m); \ \ fset = &v->arch.flagset; \ + __vcpu_flags_preempt_disable(); \ *fset &= ~(m); \ + __vcpu_flags_preempt_enable(); \ } while (0) #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c index 5edec2f49ec9..deff21bfa680 100644 --- a/arch/arm64/kernel/compat_alignment.c +++ b/arch/arm64/kernel/compat_alignment.c @@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs); unsigned int type; u32 instr = 0; - u16 tinstr = 0; int isize = 4; int thumb2_32b = 0; - int fault; instrptr = instruction_pointer(regs); if (compat_thumb_mode(regs)) { __le16 __user *ptr = (__le16 __user *)(instrptr & ~1); + u16 tinstr, tinst2; - fault = alignment_get_thumb(regs, ptr, &tinstr); - if (!fault) { - if (IS_T32(tinstr)) { - /* Thumb-2 32-bit */ - u16 tinst2; - fault = alignment_get_thumb(regs, ptr + 1, &tinst2); - instr = ((u32)tinstr << 16) | tinst2; - thumb2_32b = 1; - } else { - isize = 2; - instr = thumb2arm(tinstr); - } + if (alignment_get_thumb(regs, ptr, &tinstr)) + return 1; + + if (IS_T32(tinstr)) { /* Thumb-2 32-bit */ + if (alignment_get_thumb(regs, ptr + 1, &tinst2)) + return 1; + instr = ((u32)tinstr << 16) | tinst2; + thumb2_32b = 1; + } else { + isize = 2; + instr = thumb2arm(tinstr); } } else { - fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr); + if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr)) + return 1; } - if (fault) - return 1; - switch (CODING_BITS(instr)) { case 0x00000000: /* 3.13.4 load/store instruction extensions */ if (LDSTHD_I_BIT(instr)) diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S index 28d8a5dca5f1..d731b4655df8 100644 --- a/arch/arm64/kernel/efi-header.S +++ b/arch/arm64/kernel/efi-header.S @@ -66,7 +66,7 @@ .long .Lefi_header_end - .L_head // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem - .short 0 // DllCharacteristics + .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics .quad 0 // SizeOfStackReserve .quad 0 // SizeOfStackCommit .quad 0 // SizeOfHeapReserve diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index ca6eadeb7d1a..f531da6b362e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -29,7 +29,6 @@ menuconfig KVM select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_XFER_TO_GUEST_WORK - select SRCU select KVM_VFIO select HAVE_KVM_EVENTFD select HAVE_KVM_IRQFD diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 00610477ec7b..e1af4301b913 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) static u64 timer_get_offset(struct arch_timer_context *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + if (ctxt->offset.vm_offset) + return *ctxt->offset.vm_offset; - switch(arch_timer_ctx_index(ctxt)) { - case TIMER_VTIMER: - return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); - default: - return 0; - } + return 0; } static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) @@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { - struct kvm_vcpu *vcpu = ctxt->vcpu; - - switch(arch_timer_ctx_index(ctxt)) { - case TIMER_VTIMER: - __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; - break; - default: + if (!ctxt->offset.vm_offset) { WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); + return; } + + WRITE_ONCE(*ctxt->offset.vm_offset, offset); } u64 kvm_phys_timer_read(void) @@ -765,25 +758,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) return 0; } -/* Make the updates of cntvoff for all vtimer contexts atomic */ -static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) -{ - unsigned long i; - struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu *tmp; - - mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, tmp, kvm) - timer_set_offset(vcpu_vtimer(tmp), cntvoff); - - /* - * When called from the vcpu create path, the CPU being created is not - * included in the loop above, so we just set it here as well. - */ - timer_set_offset(vcpu_vtimer(vcpu), cntvoff); - mutex_unlock(&kvm->lock); -} - void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -791,10 +765,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); vtimer->vcpu = vcpu; + vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset; ptimer->vcpu = vcpu; /* Synchronize cntvoff across all vtimers of a VM. */ - update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); + timer_set_offset(vtimer, kvm_phys_timer_read()); timer_set_offset(ptimer, 0); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); @@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) break; case KVM_REG_ARM_TIMER_CNT: timer = vcpu_vtimer(vcpu); - update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); + timer_set_offset(timer, kvm_phys_timer_read() - value); break; case KVM_REG_ARM_TIMER_CVAL: timer = vcpu_vtimer(vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3bd732eaf087..4b2e16e696a8 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -220,6 +220,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: case KVM_CAP_ARM_SYSTEM_SUSPEND: + case KVM_CAP_IRQFD_RESAMPLE: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: @@ -1889,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits) return ret; } +static u64 get_hyp_id_aa64pfr0_el1(void) +{ + /* + * Track whether the system isn't affected by spectre/meltdown in the + * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. + * Although this is per-CPU, we make it global for simplicity, e.g., not + * to have to worry about vcpu migration. + * + * Unlike for non-protected VMs, userspace cannot override this for + * protected VMs. + */ + u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); + + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), + arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), + arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); + + return val; +} + static void kvm_hyp_init_symbols(void) { - kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 07edfc7524c9..37440e1dda93 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -33,11 +33,14 @@ * Allow for protected VMs: * - Floating-point and Advanced SIMD * - Data Independent Timing + * - Spectre/Meltdown Mitigation */ #define PVM_ID_AA64PFR0_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 08d2b004f4b7..edd969a1f36b 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val, static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) { - const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); u64 set_mask = 0; u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); - /* Spectre and Meltdown mitigation in KVM */ - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), - (u64)kvm->arch.pfr0_csv2); - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), - (u64)kvm->arch.pfr0_csv3); - return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; } diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 64c086c02c60..c4b4678bc4a4 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -44,7 +44,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) feature = smccc_get_arg1(vcpu); switch (feature) { case KVM_PTP_VIRT_COUNTER: - cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); + cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset; break; case KVM_PTP_PHYS_COUNTER: cycles = systime_snapshot.cycles; @@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) u64 val; int wa_level; + if (KVM_REG_SIZE(reg->id) != sizeof(val)) + return -ENOENT; if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 7113587222ff..3b9d4d24c361 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -666,14 +666,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr) CONFIG_PGTABLE_LEVELS), .mm_ops = &kvm_user_mm_ops, }; + unsigned long flags; kvm_pte_t pte = 0; /* Keep GCC quiet... */ u32 level = ~0; int ret; + /* + * Disable IRQs so that we hazard against a concurrent + * teardown of the userspace page tables (which relies on + * IPI-ing threads). + */ + local_irq_save(flags); ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); - VM_BUG_ON(ret); - VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS); - VM_BUG_ON(!(pte & PTE_VALID)); + local_irq_restore(flags); + + if (ret) + return ret; + + /* + * Not seeing an error, but not updating level? Something went + * deeply wrong... + */ + if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS)) + return -EFAULT; + + /* Oops, the userspace PTs are gone... Replay the fault */ + if (!kvm_pte_valid(pte)) + return -EAGAIN; return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); } @@ -1079,7 +1098,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, * * Returns the size of the mapping. */ -static unsigned long +static long transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) @@ -1091,8 +1110,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, * sure that the HVA and IPA are sufficiently aligned and that the * block map is contained within the memslot. */ - if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && - get_user_mapping_size(kvm, hva) >= PMD_SIZE) { + if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { + int sz = get_user_mapping_size(kvm, hva); + + if (sz < 0) + return sz; + + if (sz < PMD_SIZE) + return PAGE_SIZE; + /* * The address we faulted on is backed by a transparent huge * page. However, because we map the compound huge page and @@ -1192,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, { int ret = 0; bool write_fault, writable, force_pte = false; - bool exec_fault; + bool exec_fault, mte_allowed; bool device = false; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; @@ -1203,7 +1229,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); - unsigned long vma_pagesize, fault_granule; + long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; @@ -1218,6 +1244,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } /* + * Permission faults just need to update the existing leaf entry, + * and so normally don't require allocations from the memcache. The + * only exception to this is when dirty logging is enabled at runtime + * and a write fault needs to collapse a block entry into a table. + */ + if (fault_status != ESR_ELx_FSC_PERM || + (logging_active && write_fault)) { + ret = kvm_mmu_topup_memory_cache(memcache, + kvm_mmu_cache_min_pages(kvm)); + if (ret) + return ret; + } + + /* * Let's check if we will get back a huge page backed by hugetlbfs, or * get block mapping for device MMIO region. */ @@ -1269,37 +1309,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, fault_ipa &= ~(vma_pagesize - 1); gfn = fault_ipa >> PAGE_SHIFT; - mmap_read_unlock(current->mm); + mte_allowed = kvm_vma_mte_allowed(vma); - /* - * Permission faults just need to update the existing leaf entry, - * and so normally don't require allocations from the memcache. The - * only exception to this is when dirty logging is enabled at runtime - * and a write fault needs to collapse a block entry into a table. - */ - if (fault_status != ESR_ELx_FSC_PERM || - (logging_active && write_fault)) { - ret = kvm_mmu_topup_memory_cache(memcache, - kvm_mmu_cache_min_pages(kvm)); - if (ret) - return ret; - } + /* Don't use the VMA after the unlock -- it may have vanished */ + vma = NULL; - mmu_seq = vcpu->kvm->mmu_invalidate_seq; /* - * Ensure the read of mmu_invalidate_seq happens before we call - * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk - * the page we just got a reference to gets unmapped before we have a - * chance to grab the mmu_lock, which ensure that if the page gets - * unmapped afterwards, the call to kvm_unmap_gfn will take it away - * from us again properly. This smp_rmb() interacts with the smp_wmb() - * in kvm_mmu_notifier_invalidate_<page|range_end>. + * Read mmu_invalidate_seq so that KVM can detect if the results of + * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to + * acquiring kvm->mmu_lock. * - * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is - * used to avoid unnecessary overhead introduced to locate the memory - * slot because it's always fixed even @gfn is adjusted for huge pages. + * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs + * with the smp_wmb() in kvm_mmu_invalidate_end(). */ - smp_rmb(); + mmu_seq = vcpu->kvm->mmu_invalidate_seq; + mmap_read_unlock(current->mm); pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, write_fault, &writable, NULL); @@ -1350,11 +1374,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &pfn, &fault_ipa); + + if (vma_pagesize < 0) { + ret = vma_pagesize; + goto out_unlock; + } } if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) { /* Check the VMM hasn't introduced a new disallowed VMA */ - if (kvm_vma_mte_allowed(vma)) { + if (mte_allowed) { sanitise_mte_tags(kvm, pfn, vma_pagesize); } else { ret = -EFAULT; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 24908400e190..5eca0cdd961d 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -538,7 +538,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) if (!kvm_pmu_is_3p5(vcpu)) val &= ~ARMV8_PMU_PMCR_LP; - __vcpu_sys_reg(vcpu, PMCR_EL0) = val; + /* The reset bits don't indicate any state, and shouldn't be saved. */ + __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); if (val & ARMV8_PMU_PMCR_E) { kvm_pmu_enable_counter_mask(vcpu, @@ -557,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) for_each_set_bit(i, &mask, 32) kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); } + kvm_vcpu_pmu_restore_guest(vcpu); } static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 53749d3a0996..34688918c811 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; kvm_pmu_handle_pmcr(vcpu, val); - kvm_vcpu_pmu_restore_guest(vcpu); } else { /* PMCR.P & PMCR.C are RAZ */ val = __vcpu_sys_reg(vcpu, PMCR_EL0) @@ -856,6 +855,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) return true; } +static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + u64 idx; + + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) + /* PMCCNTR_EL0 */ + idx = ARMV8_PMU_CYCLE_IDX; + else + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + + *val = kvm_pmu_get_counter_value(vcpu, idx); + return 0; +} + static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -1072,7 +1087,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ - .reset = reset_pmevcntr, \ + .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } /* Macro to expand the PMEVTYPERn_EL0 register */ @@ -1982,7 +1997,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { PMU_SYS_REG(SYS_PMCEID1_EL0), .access = access_pmceid, .reset = NULL }, { PMU_SYS_REG(SYS_PMCCNTR_EL0), - .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 }, + .access = access_pmu_evcntr, .reset = reset_unknown, + .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr}, { PMU_SYS_REG(SYS_PMXEVTYPER_EL0), .access = access_pmu_evtyper, .reset = NULL }, { PMU_SYS_REG(SYS_PMXEVCNTR_EL0), diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index a6acb94ea3d6..c2edadb8ec6a 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -281,4 +281,8 @@ /* DMB */ #define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH) +/* ADR */ +#define A64_ADR(Rd, offset) \ + aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR) + #endif /* _BPF_JIT_H */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 62f805f427b7..b26da8efa616 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1900,7 +1900,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, restore_args(ctx, args_off, nargs); /* call original func */ emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx); - emit(A64_BLR(A64_R(10)), ctx); + emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx); + emit(A64_RET(A64_R(10)), ctx); /* store return value */ emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx); /* reserve a nop for bpf_tramp_image_put */ diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7fd51257e0ed..3ddde336e6a5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -447,6 +447,22 @@ config ARCH_IOREMAP protection support. However, you can enable LoongArch DMW-based ioremap() for better performance. +config ARCH_WRITECOMBINE + bool "Enable WriteCombine (WUC) for ioremap()" + help + LoongArch maintains cache coherency in hardware, but when paired + with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which + is similar to WriteCombine) is out of the scope of cache coherency + machanism for PCIe devices (this is a PCIe protocol violation, which + may be fixed in newer chipsets). + + This means WUC can only used for write-only memory regions now, so + this option is disabled by default, making WUC silently fallback to + SUC for ioremap(). You can enable this option if the kernel is ensured + to run on hardware without this bug. + + You can override this setting via writecombine=on/off boot parameter. + config ARCH_STRICT_ALIGN bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT default y diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index 4198753aa1d0..976a810352c6 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -41,8 +41,11 @@ extern void loongarch_suspend_enter(void); static inline unsigned long acpi_get_wakeup_address(void) { +#ifdef CONFIG_SUSPEND extern void loongarch_wakeup_start(void); return (unsigned long)loongarch_wakeup_start; +#endif + return 0UL; } #endif /* _ASM_LOONGARCH_ACPI_H */ diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index 8fb699b4d40a..5c9c03bdf915 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -71,9 +71,9 @@ extern unsigned long vm_map_base; #define _ATYPE32_ int #define _ATYPE64_ __s64 #ifdef CONFIG_64BIT -#define _CONST64_(x) x ## L +#define _CONST64_(x) x ## UL #else -#define _CONST64_(x) x ## LL +#define _CONST64_(x) x ## ULL #endif #endif diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h index 0051b526ac6d..c60796869b2b 100644 --- a/arch/loongarch/include/asm/bootinfo.h +++ b/arch/loongarch/include/asm/bootinfo.h @@ -13,7 +13,6 @@ const char *get_system_type(void); extern void init_environ(void); extern void memblock_init(void); extern void platform_init(void); -extern void plat_swiotlb_setup(void); extern int __init init_numa_memory(void); struct loongson_board_info { diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index b07974218393..f6177f133477 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -42,6 +42,7 @@ #define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU) #define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX) #define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX) +#define cpu_has_crc32 cpu_opt(LOONGARCH_CPU_CRC32) #define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX) #define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO) #define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ) diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index c3da91759472..88773d849e33 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -78,25 +78,26 @@ enum cpu_type_enum { #define CPU_FEATURE_FPU 3 /* CPU has FPU */ #define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */ #define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */ -#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */ -#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */ -#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */ -#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */ -#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */ -#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */ -#define CPU_FEATURE_TLB 12 /* CPU has TLB */ -#define CPU_FEATURE_CSR 13 /* CPU has CSR */ -#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */ -#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */ -#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */ -#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */ -#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */ -#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */ -#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */ -#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */ -#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */ -#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */ -#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */ +#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */ +#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */ +#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */ +#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */ +#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */ +#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */ +#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ +#define CPU_FEATURE_TLB 13 /* CPU has TLB */ +#define CPU_FEATURE_CSR 14 /* CPU has CSR */ +#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ +#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ +#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ +#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ +#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ +#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ +#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ +#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ +#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ +#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ +#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) @@ -104,6 +105,7 @@ enum cpu_type_enum { #define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU) #define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX) #define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX) +#define LOONGARCH_CPU_CRC32 BIT_ULL(CPU_FEATURE_CRC32) #define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX) #define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO) #define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ) diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 402a7d9e3a53..545e2708fbf7 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, * @offset: bus address of the memory * @size: size of the resource to map */ +extern pgprot_t pgprot_wc; + #define ioremap_wc(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC)) + ioremap_prot((offset), (size), pgprot_val(pgprot_wc)) #define ioremap_cache(offset, size) \ ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 65b7dcdea16d..83da5d29e2d1 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg) #define CPUCFG1_EP BIT(22) #define CPUCFG1_RPLV BIT(23) #define CPUCFG1_HUGEPG BIT(24) -#define CPUCFG1_IOCSRBRD BIT(25) +#define CPUCFG1_CRC32 BIT(25) #define CPUCFG1_MSGINT BIT(26) #define LOONGARCH_CPUCFG2 0x2 @@ -423,9 +423,9 @@ static __always_inline void iocsr_write64(u64 val, u32 reg) #define CSR_ASID_ASID_WIDTH 10 #define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT) -#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[47] = 0 */ +#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[VALEN-1] = 0 */ -#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[47] = 1 */ +#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[VALEN-1] = 1 */ #define LOONGARCH_CSR_PGD 0x1b /* Page table base */ diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h index 438f09d4ccf4..88554f92e010 100644 --- a/arch/loongarch/include/asm/module.lds.h +++ b/arch/loongarch/include/asm/module.lds.h @@ -2,8 +2,8 @@ /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ SECTIONS { . = ALIGN(4); - .got : { BYTE(0) } - .plt : { BYTE(0) } - .plt.idx : { BYTE(0) } - .ftrace_trampoline : { BYTE(0) } + .got 0 : { BYTE(0) } + .plt 0 : { BYTE(0) } + .plt.idx 0 : { BYTE(0) } + .ftrace_trampoline 0 : { BYTE(0) } } diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h index cc48ed262021..82d811b5c6e9 100644 --- a/arch/loongarch/include/uapi/asm/ptrace.h +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -47,11 +47,12 @@ struct user_fp_state { }; struct user_watch_state { - uint16_t dbg_info; + uint64_t dbg_info; struct { uint64_t addr; uint64_t mask; uint32_t ctrl; + uint32_t pad; } dbg_regs[8]; }; diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 3a3fce2d7846..5adf0f736c6d 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat) /* MAP BASE */ unsigned long vm_map_base; -EXPORT_SYMBOL_GPL(vm_map_base); +EXPORT_SYMBOL(vm_map_base); static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) { @@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; - elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32; + elf_hwcap = HWCAP_LOONGARCH_CPUCFG; config = read_cpucfg(LOONGARCH_CPUCFG1); if (config & CPUCFG1_UAL) { c->options |= LOONGARCH_CPU_UAL; elf_hwcap |= HWCAP_LOONGARCH_UAL; } + if (config & CPUCFG1_CRC32) { + c->options |= LOONGARCH_CPU_CRC32; + elf_hwcap |= HWCAP_LOONGARCH_CRC32; + } + config = read_cpucfg(LOONGARCH_CPUCFG2); if (config & CPUCFG2_LAM) { diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 5c67cc4fd56d..0d82907b5404 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_fpu) seq_printf(m, " fpu"); if (cpu_has_lsx) seq_printf(m, " lsx"); if (cpu_has_lasx) seq_printf(m, " lasx"); + if (cpu_has_crc32) seq_printf(m, " crc32"); if (cpu_has_complex) seq_printf(m, " complex"); if (cpu_has_crypto) seq_printf(m, " crypto"); if (cpu_has_lvz) seq_printf(m, " lvz"); diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 06bceae7d104..5fcffb452367 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -391,10 +391,10 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, return 0; } -static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info) +static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info) { u8 num; - u16 reg = 0; + u64 reg = 0; switch (note_type) { case NT_LOONGARCH_HW_BREAK: @@ -524,15 +524,16 @@ static int ptrace_hbp_set_addr(unsigned int note_type, return modify_user_hw_breakpoint(bp, &attr); } -#define PTRACE_HBP_CTRL_SZ sizeof(u32) #define PTRACE_HBP_ADDR_SZ sizeof(u64) #define PTRACE_HBP_MASK_SZ sizeof(u64) +#define PTRACE_HBP_CTRL_SZ sizeof(u32) +#define PTRACE_HBP_PAD_SZ sizeof(u32) static int hw_break_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - u16 info; + u64 info; u32 ctrl; u64 addr, mask; int ret, idx = 0; @@ -545,7 +546,7 @@ static int hw_break_get(struct task_struct *target, membuf_write(&to, &info, sizeof(info)); - /* (address, ctrl) registers */ + /* (address, mask, ctrl) registers */ while (to.left) { ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); if (ret) @@ -562,6 +563,7 @@ static int hw_break_get(struct task_struct *target, membuf_store(&to, addr); membuf_store(&to, mask); membuf_store(&to, ctrl); + membuf_zero(&to, sizeof(u32)); idx++; } @@ -582,7 +584,7 @@ static int hw_break_set(struct task_struct *target, offset = offsetof(struct user_watch_state, dbg_regs); user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); - /* (address, ctrl) registers */ + /* (address, mask, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { if (count < PTRACE_HBP_ADDR_SZ) @@ -602,7 +604,7 @@ static int hw_break_set(struct task_struct *target, break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, - offset, offset + PTRACE_HBP_ADDR_SZ); + offset, offset + PTRACE_HBP_MASK_SZ); if (ret) return ret; @@ -611,8 +613,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_MASK_SZ; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, - offset, offset + PTRACE_HBP_MASK_SZ); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) return ret; @@ -620,6 +622,11 @@ static int hw_break_set(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + offset, offset + PTRACE_HBP_PAD_SZ); + offset += PTRACE_HBP_PAD_SZ; + idx++; } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index bae84ccf6d36..4444b13418f0 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -160,6 +160,27 @@ static void __init smbios_parse(void) dmi_walk(find_tokens, NULL); } +#ifdef CONFIG_ARCH_WRITECOMBINE +pgprot_t pgprot_wc = PAGE_KERNEL_WUC; +#else +pgprot_t pgprot_wc = PAGE_KERNEL_SUC; +#endif + +EXPORT_SYMBOL(pgprot_wc); + +static int __init setup_writecombine(char *p) +{ + if (!strcmp(p, "on")) + pgprot_wc = PAGE_KERNEL_WUC; + else if (!strcmp(p, "off")) + pgprot_wc = PAGE_KERNEL_SUC; + else + pr_warn("Unknown writecombine setting \"%s\".\n", p); + + return 0; +} +early_param("writecombine", setup_writecombine); + static int usermem __initdata; static int __init early_parse_mem(char *p) @@ -368,8 +389,8 @@ static void __init arch_mem_init(char **cmdline_p) /* * In order to reduce the possibility of kernel panic when failed to * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate - * low memory as small as possible before plat_swiotlb_setup(), so - * make sparse_init() using top-down allocation. + * low memory as small as possible before swiotlb_init(), so make + * sparse_init() using top-down allocation. */ memblock_set_bottom_up(false); sparse_init(); diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index 3a690f96f00c..2463d2fea21f 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -30,7 +30,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, regs->regs[1] = 0; for (unwind_start(&state, task, regs); - !unwind_done(&state); unwind_next_frame(&state)) { + !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || !consume_entry(cookie, addr)) break; diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c index a463d6961344..ba324ba76fa1 100644 --- a/arch/loongarch/kernel/unwind.c +++ b/arch/loongarch/kernel/unwind.c @@ -28,5 +28,6 @@ bool default_next_frame(struct unwind_state *state) } while (!get_stack_info(state->sp, state->task, info)); + state->error = true; return false; } diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c index 9095fde8e55d..55afc27320e1 100644 --- a/arch/loongarch/kernel/unwind_prologue.c +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -211,7 +211,7 @@ static bool next_frame(struct unwind_state *state) pc = regs->csr_era; if (user_mode(regs) || !__kernel_text_address(pc)) - return false; + goto out; state->first = true; state->pc = pc; @@ -226,6 +226,8 @@ static bool next_frame(struct unwind_state *state) } while (!get_stack_info(state->sp, state->task, info)); +out: + state->error = true; return false; } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index e018aed34586..3b7d8129570b 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -41,7 +41,7 @@ * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; -EXPORT_SYMBOL_GPL(empty_zero_page); +EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(zero_page_mask); void setup_zero_pages(void) @@ -270,7 +270,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; #endif #ifndef __PAGETABLE_PMD_FOLDED pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; -EXPORT_SYMBOL_GPL(invalid_pmd_table); +EXPORT_SYMBOL(invalid_pmd_table); #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 288003a9f0ca..d586df48ecc6 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1022,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext emit_atomic(insn, ctx); break; + /* Speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + default: pr_err("bpf_jit: unknown opcode %02x\n", code); return -EINVAL; diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index 90da899c06a1..e2fc3b4e31f0 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -80,6 +80,10 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) JUMP_VIRT_ADDR t0, t1 + /* Enable PG */ + li.w t0, 0xb0 # PLV=0, IE=0, PG=1 + csrwr t0, LOONGARCH_CSR_CRMD + la.pcrel t0, acpi_saved_sp ld.d sp, t0, 0 SETUP_WAKEUP diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 3a2bb2e8fdad..fbff1cea62ca 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p) panic("No configuration setup"); } -#ifdef CONFIG_BLK_DEV_INITRD - if (m68k_ramdisk.size) { + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size); + + paging_init(); + + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) { initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); initrd_end = initrd_start + m68k_ramdisk.size; pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); } -#endif - - paging_init(); #ifdef CONFIG_NATFEAT nf_init(); diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 5c8cba0efc63..a700807c9b6d 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -30,6 +30,7 @@ #include <linux/init.h> #include <linux/ptrace.h> #include <linux/kallsyms.h> +#include <linux/extable.h> #include <asm/setup.h> #include <asm/fpu.h> @@ -545,7 +546,8 @@ static inline void bus_error030 (struct frame *fp) errorcode |= 2; if (mmusr & (MMU_I | MMU_WP)) { - if (ssw & 4) { + /* We might have an exception table for this PC */ + if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) { pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2a375637e007..911301224078 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -437,7 +437,7 @@ void __init paging_init(void) } min_addr = m68k_memory[0].addr; - max_addr = min_addr + m68k_memory[0].size; + max_addr = min_addr + m68k_memory[0].size - 1; memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, MEMBLOCK_NONE); for (i = 1; i < m68k_num_memory;) { @@ -452,21 +452,21 @@ void __init paging_init(void) } memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i, MEMBLOCK_NONE); - addr = m68k_memory[i].addr + m68k_memory[i].size; + addr = m68k_memory[i].addr + m68k_memory[i].size - 1; if (addr > max_addr) max_addr = addr; i++; } m68k_memoffset = min_addr - PAGE_OFFSET; - m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; + m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); flush_icache(); - high_memory = phys_to_virt(max_addr); + high_memory = phys_to_virt(max_addr) + 1; min_low_pfn = availmem >> PAGE_SHIFT; - max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; + max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1; /* Reserve kernel text/data/bss and the memory allocated in head.S */ memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr); diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c index 33788668cbdb..3779e7855bd7 100644 --- a/arch/mips/bmips/dma.c +++ b/arch/mips/bmips/dma.c @@ -5,6 +5,8 @@ #include <asm/bmips.h> #include <asm/io.h> +bool bmips_rac_flush_disable; + void arch_sync_dma_for_cpu_all(void) { void __iomem *cbr = BMIPS_GET_CBR(); @@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void) boot_cpu_type() != CPU_BMIPS4380) return; + if (unlikely(bmips_rac_flush_disable)) + return; + /* Flush stale data out of the readahead cache */ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index e95b3f78e7cd..549a6392a3d2 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -35,6 +35,8 @@ #define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c)) #define BCM6328_TP1_DISABLED BIT(9) +extern bool bmips_rac_flush_disable; + static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000; struct bmips_quirk { @@ -104,6 +106,12 @@ static void bcm6358_quirks(void) * disable SMP for now */ bmips_smp_enabled = 0; + + /* + * RAC flush causes kernel panics on BCM6358 when booting from TP1 + * because the bootloader is not initializing it properly. + */ + bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)); } static void bcm6368_quirks(void) diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 89a1511d2ee4..edf9634aa8ee 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -284,6 +284,7 @@ CONFIG_IXGB=m CONFIG_SKGE=m CONFIG_SKY2=m CONFIG_MYRI10GE=m +CONFIG_FEALNX=m CONFIG_NATSEMI=m CONFIG_NS83820=m CONFIG_S2IO=m diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 52cbde60edf5..9ff55cb80a64 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ #define EMITS_PT_NOTE #endif +#define RUNTIME_DISCARD_EXIT + #include <asm-generic/vmlinux.lds.h> #undef mips diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 29e51649203b..a8cdba75f98d 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -26,7 +26,6 @@ config KVM select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO select MMU_NOTIFIER - select SRCU select INTERVAL_TREE select KVM_GENERIC_HARDWARE_ENABLING help diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts index 73f8c998c64d..d4f5f159d6f2 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts @@ -10,7 +10,6 @@ / { model = "fsl,T1040RDB-REV-A"; - compatible = "fsl,T1040RDB-REV-A"; }; &seville_port0 { diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index b6733e7e6580..dd3aab81e9de 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts @@ -180,6 +180,9 @@ }; &seville_port8 { - ethernet = <&enet0>; + status = "okay"; +}; + +&seville_port9 { status = "okay"; }; diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index f58eb820eb5e..ad0ab33336b8 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -686,6 +686,7 @@ seville_port8: port@8 { reg = <8>; phy-mode = "internal"; + ethernet = <&enet0>; status = "disabled"; fixed-link { @@ -697,6 +698,7 @@ seville_port9: port@9 { reg = <9>; phy-mode = "internal"; + ethernet = <&enet1>; status = "disabled"; fixed-link { diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 110258277959..f73c98be56c8 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -461,6 +461,7 @@ CONFIG_MV643XX_ETH=m CONFIG_SKGE=m CONFIG_SKY2=m CONFIG_MYRI10GE=m +CONFIG_FEALNX=m CONFIG_NATSEMI=m CONFIG_NS83820=m CONFIG_PCMCIA_AXNET=m diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 2bbc0fcce04a..5e26c7f2c25a 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -148,6 +148,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, */ } +static inline bool __pte_protnone(unsigned long pte) +{ + return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE); +} + static inline bool __pte_flags_need_flush(unsigned long oldval, unsigned long newval) { @@ -164,8 +169,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval, /* * We do not expect kernel mappings or non-PTEs or not-present PTEs. */ - VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); - VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED); VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 92a968202ba7..365d2720097c 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -2,7 +2,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) #define _GLOBAL_KASAN(fn) _GLOBAL(__##fn) #define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn) #define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn) diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index 2aa0e31e6884..60ba22770f51 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -30,11 +30,17 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); void memcpy_flushcache(void *dest, const void *src, size_t size); +#ifdef CONFIG_KASAN +/* __mem variants are used by KASAN to implement instrumented meminstrinsics. */ +#ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX +#define __memset memset +#define __memcpy memcpy +#define __memmove memmove +#else /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */ void *__memset(void *s, int c, __kernel_size_t count); void *__memcpy(void *to, const void *from, __kernel_size_t n); void *__memmove(void *to, const void *from, __kernel_size_t n); - -#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#ifndef __SANITIZE_ADDRESS__ /* * For files that are not instrumented (e.g. mm/slub.c) we * should use not instrumented version of mem* functions. @@ -46,8 +52,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n); #ifndef __NO_FORTIFY #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ #endif - -#endif +#endif /* !__SANITIZE_ADDRESS__ */ +#endif /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */ +#endif /* CONFIG_KASAN */ #ifdef CONFIG_PPC64 #ifndef CONFIG_KASAN diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 5a319863f289..69623b9045d5 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -13,8 +13,13 @@ # If you really need to reference something from prom_init.o add # it to the list below: -grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null -if [ $? -eq 0 ] +has_renamed_memintrinsics() +{ + grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \ + ! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG} +} + +if has_renamed_memintrinsics then MEM_FUNCS="__memcpy __memset" else diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 2087a785f05f..5fff0d04b23f 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, static int ppr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { + if (!target->thread.regs) + return -EINVAL; + return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64)); } @@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + if (!target->thread.regs) + return -EINVAL; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.regs->ppr, 0, sizeof(u64)); } diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index a9f57dad6d91..902611954200 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -22,7 +22,6 @@ config KVM select PREEMPT_NOTIFIERS select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL - select SRCU select KVM_VFIO select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4c5405fc5538..d23e25e8432d 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -576,6 +576,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif +#ifdef CONFIG_HAVE_KVM_IRQFD + case KVM_CAP_IRQFD_RESAMPLE: + r = !xive_enabled(); + break; +#endif + case KVM_CAP_PPC_ALLOC_HTAB: r = hv_enabled; break; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2bef19cc1b98..af46aa88422b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma } /* - * Check for a read fault. This could be caused by a read on an - * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page. + * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as + * defined in protection_map[]. Read faults can only be caused by + * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix. */ - if (unlikely(!(vma->vm_flags & VM_READ))) + if (unlikely(!vma_is_accessible(vma))) return true; + + if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))) + return true; + /* * We should ideally do the vma pkey access check here. But in the * fault path, handle_mm_fault() also does the same check. To avoid diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b44ce71917d7..16cfe56be05b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -366,6 +366,7 @@ void update_numa_distance(struct device_node *node) WARN(numa_distance_table[nid][nid] == -1, "NUMA distance details for node %d not provided\n", nid); } +EXPORT_SYMBOL_GPL(update_numa_distance); /* * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index b481c5c8bae1..21b22bf16ce6 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -7,6 +7,7 @@ config PPC_PSERIES select OF_DYNAMIC select FORCE_PCI select PCI_MSI + select GENERIC_ALLOCATOR select PPC_XICS select PPC_XIVE_SPAPR select PPC_ICP_NATIVE diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 2f8385523a13..1a53e048ceb7 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -1428,6 +1428,13 @@ static int papr_scm_probe(struct platform_device *pdev) return -ENODEV; } + /* + * open firmware platform device create won't update the NUMA + * distance table. For PAPR SCM devices we use numa_map_to_online_node() + * to find the nearest online NUMA node and that requires correct + * distance table information. + */ + update_numa_distance(dn); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 559112312810..513180467562 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -856,6 +856,13 @@ int pseries_vas_dlpar_cpu(void) { int new_nr_creds, rc; + /* + * NX-GZIP is not enabled. Nothing to do for DLPAR event + */ + if (!copypaste_feat) + return 0; + + rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat, (u64)virt_to_phys(&hv_cop_caps)); @@ -1012,6 +1019,7 @@ static int __init pseries_vas_init(void) * Linux supports user space COPY/PASTE only with Radix */ if (!radix_enabled()) { + copypaste_feat = false; pr_err("API is supported only with radix page tables\n"); return -ENOTSUPP; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 73c620c2a3a1..e753a6bd4888 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1275,7 +1275,7 @@ static int xmon_batch_next_cpu(void) while (!cpumask_empty(&xmon_batch_cpus)) { cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus, xmon_batch_start_cpu, true); - if (cpu == nr_cpumask_bits) + if (cpu >= nr_cpu_ids) break; if (xmon_batch_start_cpu == -1) xmon_batch_start_cpu = cpu; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c5e42cc37604..eb7f29a412f8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -126,6 +126,7 @@ config RISCV select OF_IRQ select PCI_DOMAINS_GENERIC if PCI select PCI_MSI if PCI + select RISCV_ALTERNATIVE if !XIP_KERNEL select RISCV_INTC select RISCV_TIMER if RISCV_SBI select SIFIVE_PLIC @@ -401,9 +402,8 @@ config RISCV_ISA_C config RISCV_ISA_SVPBMT bool "SVPBMT extension support" depends on 64BIT && MMU - depends on !XIP_KERNEL + depends on RISCV_ALTERNATIVE default y - select RISCV_ALTERNATIVE help Adds support to dynamically detect the presence of the SVPBMT ISA-extension (Supervisor-mode: page-based memory types) and @@ -428,8 +428,8 @@ config TOOLCHAIN_HAS_ZBB config RISCV_ISA_ZBB bool "Zbb extension support for bit manipulation instructions" depends on TOOLCHAIN_HAS_ZBB - depends on !XIP_KERNEL && MMU - select RISCV_ALTERNATIVE + depends on MMU + depends on RISCV_ALTERNATIVE default y help Adds support to dynamically detect the presence of the ZBB @@ -443,9 +443,9 @@ config RISCV_ISA_ZBB config RISCV_ISA_ZICBOM bool "Zicbom extension support for non-coherent DMA operation" - depends on !XIP_KERNEL && MMU + depends on MMU + depends on RISCV_ALTERNATIVE default y - select RISCV_ALTERNATIVE select RISCV_DMA_NONCOHERENT help Adds support to dynamically detect the presence of the ZICBOM @@ -464,6 +464,28 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 +config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + def_bool y + # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc + depends on AS_IS_GNU && AS_VERSION >= 23800 + help + Newer binutils versions default to ISA spec version 20191213 which + moves some instructions from the I extension to the Zicsr and Zifencei + extensions. + +config TOOLCHAIN_NEEDS_OLD_ISA_SPEC + def_bool y + depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 + depends on CC_IS_CLANG && CLANG_VERSION < 170000 + help + Certain versions of clang do not support zicsr and zifencei via -march + but newer versions of binutils require it for the reasons noted in the + help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This + option causes an older ISA spec compatible with these older versions + of clang to be passed to GAS, which has the same result as passing zicsr + and zifencei to -march. + config FPU bool "FPU support" default y diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas index 69621ae6d647..0c8f4652cd82 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.erratas @@ -2,8 +2,7 @@ menu "CPU errata selection" config ERRATA_SIFIVE bool "SiFive errata" - depends on !XIP_KERNEL - select RISCV_ALTERNATIVE + depends on RISCV_ALTERNATIVE help All SiFive errata Kconfig depend on this Kconfig. Disabling this Kconfig will disable all SiFive errata. Please say "Y" @@ -35,8 +34,7 @@ config ERRATA_SIFIVE_CIP_1200 config ERRATA_THEAD bool "T-HEAD errata" - depends on !XIP_KERNEL - select RISCV_ALTERNATIVE + depends on RISCV_ALTERNATIVE help All T-HEAD errata Kconfig depend on this Kconfig. Disabling this Kconfig will disable all T-HEAD errata. Please say "Y" diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 6203c3378922..b05e833a022d 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -57,10 +57,12 @@ riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c -# Newer binutils versions default to ISA spec version 20191213 which moves some -# instructions from the I extension to the Zicsr and Zifencei extensions. -toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei) -riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei +ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC +KBUILD_CFLAGS += -Wa,-misa-spec=2.2 +KBUILD_AFLAGS += -Wa,-misa-spec=2.2 +else +riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei +endif # Check if the toolchain supports Zihintpause extension riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause @@ -84,6 +86,13 @@ endif # Avoid generating .eh_frame sections. KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +# The RISC-V attributes frequently cause compatibility issues and provide no +# information, so just turn them off. +KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute) +KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute) +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) +KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi index 07e2e2649604..f87c5164d9cf 100644 --- a/arch/riscv/boot/dts/canaan/k210.dtsi +++ b/arch/riscv/boot/dts/canaan/k210.dtsi @@ -259,7 +259,6 @@ <&sysclk K210_CLK_APB0>; clock-names = "ssi_clk", "pclk"; resets = <&sysrst K210_RST_SPI2>; - spi-max-frequency = <25000000>; }; i2s0: i2s@50250000 { diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c index da55cb247e89..31d2ebea4286 100644 --- a/arch/riscv/errata/sifive/errata.c +++ b/arch/riscv/errata/sifive/errata.c @@ -111,7 +111,7 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin, mutex_lock(&text_mutex); patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt), alt->alt_len); - mutex_lock(&text_mutex); + mutex_unlock(&text_mutex); cpu_apply_errata |= tmp; } } diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 5c3e7b97fcc6..0a55099bb734 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -22,6 +22,14 @@ */ enum fixed_addresses { FIX_HOLE, + /* + * The fdt fixmap mapping must be PMD aligned and will be mapped + * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit. + */ + FIX_FDT_END, + FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, + + /* Below fixmaps will be mapped using fixmap_pte */ FIX_PTE, FIX_PMD, FIX_PUD, diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 9e73922e1e2e..d47d87c2d7e3 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop #endif -#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* _ASM_RISCV_FTRACE_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index e3021b2590de..6263a0de1c6a 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -57,18 +57,31 @@ struct riscv_isa_ext_data { unsigned int isa_ext_id; }; +unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); + +#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + static __always_inline bool riscv_has_extension_likely(const unsigned long ext) { compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - asm_volatile_goto( - ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_no); + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm_volatile_goto( + ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_no); + } else { + if (!__riscv_isa_extension_available(NULL, ext)) + goto l_no; + } return true; l_no: @@ -81,26 +94,23 @@ riscv_has_extension_unlikely(const unsigned long ext) compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - asm_volatile_goto( - ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_yes); + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm_volatile_goto( + ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_yes); + } else { + if (__riscv_isa_extension_available(NULL, ext)) + goto l_yes; + } return false; l_yes: return true; } -unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); - -#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) - -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - #endif #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 5ff1f19fd45c..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,8 +19,6 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; - /* A local tlb flush is needed before user execution can resume. */ - cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index f433121774c0..63c98833d510 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -9,4 +9,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text(void *addr, u32 *insns, int ninsns); +extern int riscv_patch_in_stop_machine; + #endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index ab05f892d317..f641837ccf31 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -87,9 +87,13 @@ #define FIXADDR_TOP PCI_IO_START #ifdef CONFIG_64BIT -#define FIXADDR_SIZE PMD_SIZE +#define MAX_FDT_SIZE PMD_SIZE +#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) +#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) #else -#define FIXADDR_SIZE PGDIR_SIZE +#define MAX_FDT_SIZE PGDIR_SIZE +#define FIX_FDT_SIZE MAX_FDT_SIZE +#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) #endif #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 907b9efd39a8..a09196f8de68 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -12,6 +12,8 @@ #include <asm/errata_list.h> #ifdef CONFIG_MMU +extern unsigned long asid_mask; + static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); @@ -22,24 +24,6 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} - #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile index 260daf3236d3..7f34f3c7c882 100644 --- a/arch/riscv/kernel/compat_vdso/Makefile +++ b/arch/riscv/kernel/compat_vdso/Makefile @@ -14,6 +14,10 @@ COMPAT_LD := $(LD) COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 COMPAT_LD_FLAGS := -melf32lriscv +# Disable attributes, as they're useless and break the build. +COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute) +COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) + # Files to link into the compat_vdso obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 5bff37af4770..03a6434a8cdd 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -15,10 +15,19 @@ void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) { mutex_lock(&text_mutex); + + /* + * The code sequences we use for ftrace can't be patched while the + * kernel is running, so we need to use stop_machine() to modify them + * for now. This doesn't play nice with text_mutex, we use this flag + * to elide the check. + */ + riscv_patch_in_stop_machine = true; } void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) { + riscv_patch_in_stop_machine = false; mutex_unlock(&text_mutex); } @@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { int out; - ftrace_arch_code_modify_prepare(); + mutex_lock(&text_mutex); out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); - ftrace_arch_code_modify_post_process(); + mutex_unlock(&text_mutex); return out; } diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 8086d1a281cd..575e71d6c8ae 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -11,6 +11,7 @@ #include <asm/kprobes.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> +#include <asm/ftrace.h> #include <asm/patch.h> struct patch_insn { @@ -20,6 +21,8 @@ struct patch_insn { atomic_t cpu_count; }; +int riscv_patch_in_stop_machine = false; + #ifdef CONFIG_MMU /* * The fix_to_virt(, idx) needs a const value (not a dynamic variable of @@ -60,8 +63,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores. + * + * We're currently using stop_machine() for ftrace & kprobes, and while + * that ensures text_mutex is held before installing the mappings it + * does not ensure text_mutex is held by the calling thread. That's + * safe but triggers a lockdep failure, so just elide it for that + * specific case. */ - lockdep_assert_held(&text_mutex); + if (!riscv_patch_in_stop_machine) + lockdep_assert_held(&text_mutex); if (across_pages) patch_map(addr + len, FIX_TEXT_POKE1); @@ -125,6 +135,7 @@ NOKPROBE_SYMBOL(patch_text_cb); int patch_text(void *addr, u32 *insns, int ninsns) { + int ret; struct patch_insn patch = { .addr = addr, .insns = insns, @@ -132,7 +143,18 @@ int patch_text(void *addr, u32 *insns, int ninsns) .cpu_count = ATOMIC_INIT(0), }; - return stop_machine_cpuslocked(patch_text_cb, - &patch, cpu_online_mask); + /* + * kprobes takes text_mutex, before calling patch_text(), but as we call + * calls stop_machine(), the lockdep assertion in patch_insn_write() + * gets confused by the context in which the lock is taken. + * Instead, ensure the lock is held before calling stop_machine(), and + * set riscv_patch_in_stop_machine to skip the check in + * patch_insn_write(). + */ + lockdep_assert_held(&text_mutex); + riscv_patch_in_stop_machine = true; + ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); + riscv_patch_in_stop_machine = false; + return ret; } NOKPROBE_SYMBOL(patch_text); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 376d2827e736..a059b73f4ddb 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -278,12 +278,8 @@ void __init setup_arch(char **cmdline_p) #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else - if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) - unflatten_device_tree(); - else - pr_err("No DTB found in kernel mappings\n"); + unflatten_device_tree(); #endif - early_init_fdt_scan_reserved_mem(); misc_mem_init(); init_resources(); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bfb2afa4135f..dee66c9290cc 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -19,6 +19,7 @@ #include <asm/signal32.h> #include <asm/switch_to.h> #include <asm/csr.h> +#include <asm/cacheflush.h> extern u32 __user_rt_sigreturn[2]; @@ -181,6 +182,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, { struct rt_sigframe __user *frame; long err = 0; + unsigned long __maybe_unused addr; frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) @@ -209,7 +211,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn, sizeof(frame->sigreturn_code))) return -EFAULT; - regs->ra = (unsigned long)&frame->sigreturn_code; + + addr = (unsigned long)&frame->sigreturn_code; + /* Make sure the two instructions are pushed to icache. */ + flush_icache_range(addr, addr + sizeof(frame->sigreturn_code)); + + regs->ra = addr; #endif /* CONFIG_MMU */ /* diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index f9a5a7c90ff0..64a9c093aef9 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -101,7 +101,7 @@ void notrace walk_stackframe(struct task_struct *task, while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) break; - pc = (*ksp++) - 0x4; + pc = READ_ONCE_NOCHECK(*ksp++) - 0x4; } } diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index d5a658a047a7..5682d8c017b3 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -28,7 +28,6 @@ config KVM select KVM_XFER_TO_GUEST_WORK select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_EVENTFD - select SRCU help Support hosting virtualized guest machines. diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index ad34519c8a13..3ac2ff6a65da 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) return; delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); - if (delta_ns) { - hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); - t->next_set = true; - } + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; } static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..12e22e7330e7 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator); static unsigned long asid_bits; static unsigned long num_asids; -static unsigned long asid_mask; +unsigned long asid_mask; static atomic_long_t current_version; @@ -196,16 +196,6 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); -#ifdef CONFIG_SMP - else { - cpumask_t *mask = &mm->context.tlb_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - local_flush_tlb_all_asid(cntx & asid_mask); - } - } -#endif } static void set_mm_noasid(struct mm_struct *mm) @@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm) local_flush_tlb_all(); } -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { - if (static_branch_unlikely(&use_asid_allocator)) - set_mm_asid(mm, cpu); - else - set_mm_noasid(mm); + /* + * The mm_cpumask indicates which harts' TLBs contain the virtual + * address mapping of the mm. Compared to noasid, using asid + * can't guarantee that stale TLB entries are invalidated because + * the asid mechanism wouldn't flush TLB for every switch_mm for + * performance. So when using asid, keep all CPUs footmarks in + * cpumask() until mm reset. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); + if (static_branch_unlikely(&use_asid_allocator)) { + set_mm_asid(next, cpu); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + set_mm_noasid(next); + } } static int __init asids_init(void) @@ -274,7 +276,8 @@ static int __init asids_init(void) } early_initcall(asids_init); #else -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { /* Nothing to do here when there is no MMU */ } @@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - set_mm(next, cpu); + set_mm(prev, next, cpu); flush_icache_deferred(next, cpu); } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 460f785f6e09..d5f3e501dffb 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a no_context(regs, addr); return; } + if (pud_leaf(*pud_k)) + goto flush_tlb; /* * Since the vmalloc area is global, it is unnecessary @@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a no_context(regs, addr); return; } + if (pmd_leaf(*pmd_k)) + goto flush_tlb; /* * Make sure the actual PTE exists as well to @@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * ordering constraint, not a cache flush; it is * necessary even after writing invalid entries. */ +flush_tlb: local_flush_tlb_page(addr); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 478d6763a01a..0f14f4a8d179 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -57,7 +57,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] EXPORT_SYMBOL(empty_zero_page); extern char _start[]; -#define DTB_EARLY_BASE_VA PGDIR_SIZE void *_dtb_early_va __initdata; uintptr_t _dtb_early_pa __initdata; @@ -236,31 +235,22 @@ static void __init setup_bootmem(void) set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); reserve_initrd_mem(); + + /* + * No allocation should be done before reserving the memory as defined + * in the device tree, otherwise the allocation could end up in a + * reserved region. + */ + early_init_fdt_scan_reserved_mem(); + /* * If DTB is built in, no need to reserve its memblock. * Otherwise, do reserve it but avoid using * early_init_fdt_reserve_self() since __pa() does * not work for DTB pointers that are fixmap addresses */ - if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) { - /* - * In case the DTB is not located in a memory region we won't - * be able to locate it later on via the linear mapping and - * get a segfault when accessing it via __va(dtb_early_pa). - * To avoid this situation copy DTB to a memory region. - * Note that memblock_phys_alloc will also reserve DTB region. - */ - if (!memblock_is_memory(dtb_early_pa)) { - size_t fdt_size = fdt_totalsize(dtb_early_va); - phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE); - void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size); - - memcpy(new_dtb_early_va, dtb_early_va, fdt_size); - early_memunmap(new_dtb_early_va, fdt_size); - _dtb_early_pa = new_dtb_early_pa; - } else - memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - } + if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) + memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); dma_contiguous_reserve(dma32_phys_limit); if (IS_ENABLED(CONFIG_64BIT)) @@ -279,9 +269,6 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); -static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); -static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); -static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); #ifdef CONFIG_XIP_KERNEL #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops)) @@ -626,9 +613,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp, #define trampoline_pgd_next (pgtable_l5_enabled ? \ (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \ (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)) -#define early_dtb_pgd_next (pgtable_l5_enabled ? \ - (uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ? \ - (uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd)) #else #define pgd_next_t pte_t #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) @@ -636,7 +620,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp, #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pte_mapping(__nextp, __va, __pa, __sz, __prot) #define fixmap_pgd_next ((uintptr_t)fixmap_pte) -#define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd) #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) @@ -860,32 +843,28 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early) * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR * entry. */ -static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa) +static void __init create_fdt_early_page_table(pgd_t *pgdir, + uintptr_t fix_fdt_va, + uintptr_t dtb_pa) { -#ifndef CONFIG_BUILTIN_DTB uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); - create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, - IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa, - PGDIR_SIZE, - IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL); - - if (pgtable_l5_enabled) - create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA, - (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE); - - if (pgtable_l4_enabled) - create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA, - (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE); +#ifndef CONFIG_BUILTIN_DTB + /* Make sure the fdt fixmap address is always aligned on PMD size */ + BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); - if (IS_ENABLED(CONFIG_64BIT)) { - create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, + /* In 32-bit only, the fdt lies in its own PGD */ + if (!IS_ENABLED(CONFIG_64BIT)) { + create_pgd_mapping(early_pg_dir, fix_fdt_va, + pa, MAX_FDT_SIZE, PAGE_KERNEL); + } else { + create_pmd_mapping(fixmap_pmd, fix_fdt_va, pa, PMD_SIZE, PAGE_KERNEL); - create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, + create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE, pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); } - dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); + dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); #else /* * For 64-bit kernel, __va can't be used since it would return a linear @@ -1055,7 +1034,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_kernel_page_table(early_pg_dir, true); /* Setup early mapping for FDT early scan */ - create_fdt_early_page_table(early_pg_dir, dtb_pa); + create_fdt_early_page_table(early_pg_dir, + __fix_to_virt(FIX_FDT), dtb_pa); /* * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap @@ -1097,6 +1077,16 @@ static void __init setup_vm_final(void) u64 i; /* Setup swapper PGD for fixmap */ +#if !defined(CONFIG_64BIT) + /* + * In 32-bit, the device tree lies in a pgd entry, so it must be copied + * directly in swapper_pg_dir in addition to the pgd entry that points + * to fixmap_pte. + */ + unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT)); + + set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]); +#endif create_pgd_mapping(swapper_pg_dir, FIXADDR_START, __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index ce7dfc81bb3f..ef701fa83f36 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,7 +5,23 @@ #include <linux/sched.h> #include <asm/sbi.h> #include <asm/mmu_context.h> -#include <asm/tlbflush.h> + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} void flush_tlb_all(void) { @@ -15,7 +31,6 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { - struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -27,16 +42,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; if (static_branch_unlikely(&use_asid_allocator)) { - unsigned long asid = atomic_long_read(&mm->context.id); - - /* - * TLB will be immediately flushed on harts concurrently - * executing this MM context. TLB flush on other harts - * is deferred until this MM context migrates there. - */ - cpumask_setall(pmask); - cpumask_clear_cpu(cpuid, pmask); - cpumask_andnot(pmask, pmask, cmask); + unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index f5a668736c79..acdc3f040195 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -10,6 +10,7 @@ #include <linux/filter.h> #include <linux/memory.h> #include <linux/stop_machine.h> +#include <asm/patch.h> #include "bpf_jit.h" #define RV_REG_TCC RV_REG_A6 diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile index d16bf715a586..5730797a6b40 100644 --- a/arch/riscv/purgatory/Makefile +++ b/arch/riscv/purgatory/Makefile @@ -84,12 +84,7 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_ctype.o += $(PURGATORY_CFLAGS) -AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_strcmp.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_strlen.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_strncmp.o += -Wa,-gdwarf-2 +asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x)) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/s390/Makefile b/arch/s390/Makefile index b3235ab0ace8..ed646c583e4f 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -162,7 +162,7 @@ vdso_prepare: prepare0 ifdef CONFIG_EXPOLINE_EXTERN modules_prepare: expoline_prepare -expoline_prepare: +expoline_prepare: scripts $(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o endif endif diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c index 9b14045065b6..74b5cd264862 100644 --- a/arch/s390/boot/ipl_report.c +++ b/arch/s390/boot/ipl_report.c @@ -57,11 +57,19 @@ repeat: if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size && intersects(initrd_data.start, initrd_data.size, safe_addr, size)) safe_addr = initrd_data.start + initrd_data.size; + if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) { + safe_addr = (unsigned long)comps + comps->len; + goto repeat; + } for_each_rb_entry(comp, comps) if (intersects(safe_addr, size, comp->addr, comp->len)) { safe_addr = comp->addr + comp->len; goto repeat; } + if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) { + safe_addr = (unsigned long)certs + certs->len; + goto repeat; + } for_each_rb_entry(cert, certs) if (intersects(safe_addr, size, cert->addr, cert->len)) { safe_addr = cert->addr + cert->len; diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 3c68fe49042c..4ccf66d29fc2 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -23,7 +23,6 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -90,7 +89,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSMALLOC_STAT=y @@ -298,7 +296,6 @@ CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m @@ -340,7 +337,6 @@ CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -351,7 +347,6 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m @@ -363,14 +358,11 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_PERF=y CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m @@ -584,7 +576,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -828,6 +820,7 @@ CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_WQ_WATCHDOG=y CONFIG_TEST_LOCKUP=m +CONFIG_DEBUG_PREEMPT=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y CONFIG_DEBUG_ATOMIC_SLEEP=y @@ -843,6 +836,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300 # CONFIG_RCU_TRACE is not set CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y +CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y CONFIG_IRQSOFF_TRACER=y @@ -857,6 +851,7 @@ CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m +CONFIG_SAMPLE_FTRACE_OPS=m CONFIG_DEBUG_ENTRY=y CONFIG_CIO_INJECT=y CONFIG_KUNIT=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 9ab91632f74c..693297a2e897 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -21,7 +21,6 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -85,7 +84,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSMALLOC_STAT=y @@ -289,7 +287,6 @@ CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m @@ -330,7 +327,6 @@ CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -341,7 +337,6 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m @@ -353,14 +348,11 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_PERF=y CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m @@ -573,7 +565,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -795,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y +CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y @@ -805,6 +798,7 @@ CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m +CONFIG_SAMPLE_FTRACE_OPS=m CONFIG_KUNIT=m CONFIG_KUNIT_DEBUGFS=y CONFIG_LKDTM=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index a9c0c81d1de9..33a232bb68af 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -58,7 +58,7 @@ CONFIG_ZFCP=y # CONFIG_VMCP is not set # CONFIG_MONWRITER is not set # CONFIG_S390_VMUR is not set -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_VIRTIO_MENU is not set # CONFIG_VHOST_MENU is not set # CONFIG_IOMMU_SUPPORT is not set diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index cf9659e13f03..ea244a73efad 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request, } return 0; case PTRACE_GET_LAST_BREAK: - put_user(child->thread.last_break, - (unsigned long __user *) data); - return 0; + return put_user(child->thread.last_break, (unsigned long __user *)data); case PTRACE_ENABLE_TE: if (!MACHINE_HAS_TE) return -EIO; @@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } return 0; case PTRACE_GET_LAST_BREAK: - put_user(child->thread.last_break, - (unsigned int __user *) data); - return 0; + return put_user(child->thread.last_break, (unsigned int __user *)data); } return compat_ptrace_request(child, request, addr, data); } diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 33f4ff909476..45fdf2a9b2e3 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -31,7 +31,6 @@ config KVM select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_INVALID_WAKEUPS select HAVE_KVM_NO_POLL - select SRCU select KVM_VFIO select INTERVAL_TREE select MMU_NOTIFIER diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 0ee02dae14b2..2cda8d9d7c6e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu) * handle_external_interrupt - used for external interruption interceptions * @vcpu: virtual cpu * - * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if - * the new PSW does not have external interrupts disabled. In the first case, - * we've got to deliver the interrupt manually, and in the second case, we - * drop to userspace to handle the situation there. + * This interception occurs if: + * - the CPUSTAT_EXT_INT bit was already set when the external interrupt + * occurred. In this case, the interrupt needs to be injected manually to + * preserve interrupt priority. + * - the external new PSW has external interrupts enabled, which will cause an + * interruption loop. We drop to userspace in this case. + * + * The latter case can be detected by inspecting the external mask bit in the + * external new psw. + * + * Under PV, only the latter case can occur, since interrupt priorities are + * handled in the ultravisor. */ static int handle_external_interrupt(struct kvm_vcpu *vcpu) { @@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) vcpu->stat.exit_external_interrupt++; - rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); - if (rc) - return rc; - /* We can not handle clock comparator or timer interrupt with bad PSW */ + if (kvm_s390_pv_cpu_is_protected(vcpu)) { + newpsw = vcpu->arch.sie_block->gpsw; + } else { + rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); + if (rc) + return rc; + } + + /* + * Clock comparator or timer interrupt with external interrupt enabled + * will cause interrupt loop. Drop to userspace. + */ if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && (newpsw.mask & PSW_MASK_EXT)) return -EOPNOTSUPP; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 39b36562c043..1eeb9ae57879 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -573,6 +573,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_S390_DIAG318: + case KVM_CAP_IRQFD_RESAMPLE: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 720036fb1924..d44214072779 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size) "4: slgr %0,%0\n" "5:\n" EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b) - : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2) : "a" (empty_zero_page), [spec] "d" (spec.val) : "cc", "memory", "0"); return size; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d0846ba818ee..6b1876e4ad3f 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -539,7 +539,7 @@ static void bpf_jit_plt(void *plt, void *ret, void *target) { memcpy(plt, bpf_plt, BPF_PLT_SIZE); *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret; - *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target; + *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret; } /* @@ -2010,7 +2010,9 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, } __packed insn; char expected_plt[BPF_PLT_SIZE]; char current_plt[BPF_PLT_SIZE]; + char new_plt[BPF_PLT_SIZE]; char *plt; + char *ret; int err; /* Verify the branch to be patched. */ @@ -2032,12 +2034,15 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE); if (err < 0) return err; - bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr); + ret = (char *)ip + 6; + bpf_jit_plt(expected_plt, ret, old_addr); if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE)) return -EINVAL; /* Adjust the call address. */ + bpf_jit_plt(new_plt, ret, new_addr); s390_kernel_write(plt + (bpf_plt_target - bpf_plt), - &new_addr, sizeof(void *)); + new_plt + (bpf_plt_target - bpf_plt), + sizeof(void *)); } /* Adjust the mask of the branch. */ diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index ef38b1514c77..e16afacc8fd1 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, return r; } -int zpci_setup_bus_resources(struct zpci_dev *zdev, - struct list_head *resources) +int zpci_setup_bus_resources(struct zpci_dev *zdev) { unsigned long addr, size, flags; struct resource *res; @@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, return -ENOMEM; } zdev->bars[i].res = res; - pci_add_resource(resources, res); } zdev->has_resources = 1; @@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) { + struct resource *res; int i; + pci_lock_rescan_remove(); for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (!zdev->bars[i].size || !zdev->bars[i].res) + res = zdev->bars[i].res; + if (!res) continue; + release_resource(res); + pci_bus_remove_resource(zdev->zbus->bus, res); zpci_free_iomap(zdev, zdev->bars[i].map_idx); - release_resource(zdev->bars[i].res); - kfree(zdev->bars[i].res); + zdev->bars[i].res = NULL; + kfree(res); } zdev->has_resources = 0; + pci_unlock_rescan_remove(); } int pcibios_device_add(struct pci_dev *pdev) diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 6a8da1b742ae..a99926af2b69 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -41,9 +41,7 @@ static int zpci_nb_devices; */ static int zpci_bus_prepare_device(struct zpci_dev *zdev) { - struct resource_entry *window, *n; - struct resource *res; - int rc; + int rc, i; if (!zdev_enabled(zdev)) { rc = zpci_enable_device(zdev); @@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev) } if (!zdev->has_resources) { - zpci_setup_bus_resources(zdev, &zdev->zbus->resources); - resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) { - res = window->res; - pci_bus_add_resource(zdev->zbus->bus, res, 0); + zpci_setup_bus_resources(zdev); + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (zdev->bars[i].res) + pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0); } } diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index e96c9860e064..af9f0ac79a1b 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev) int zpci_alloc_domain(int domain); void zpci_free_domain(int domain); -int zpci_setup_bus_resources(struct zpci_dev *zdev, - struct list_head *resources); +int zpci_setup_bus_resources(struct zpci_dev *zdev); static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus, unsigned int devfn) diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 27aebf1e75a2..3ef7adf739c8 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -50,6 +50,7 @@ #define SR_FD 0x00008000 #define SR_MD 0x40000000 +#define SR_USER_MASK 0x00000303 // M, Q, S, T bits /* * DSP structure and data */ diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 90f495d35db2..a6bfc6f37491 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -115,6 +115,7 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) { unsigned int err = 0; + unsigned int sr = regs->sr & ~SR_USER_MASK; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(regs[1]); @@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p COPY(sr); COPY(pc); #undef COPY + regs->sr = (regs->sr & SR_USER_MASK) | sr; + #ifdef CONFIG_SH_FPU if (boot_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1720eabc31ec..57a14206ad8f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -283,7 +283,6 @@ config X86 select RTC_LIB select RTC_MC146818_LIB select SPARSE_IRQ - select SRCU select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT @@ -1938,7 +1937,6 @@ config X86_SGX depends on X86_64 && CPU_SUP_INTEL && X86_X2APIC depends on CRYPTO=y depends on CRYPTO_SHA256=y - select SRCU select MMU_NOTIFIER select NUMA_KEEP_MEMINFO if NUMA select XARRAY_MULTI diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index b70559b821df..2106a2bd152b 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -3,9 +3,14 @@ core-y += arch/x86/crypto/ # # Disable SSE and other FP/SIMD instructions to match normal x86 +# This is required to work around issues in older LLVM versions, but breaks +# GCC versions < 11. See: +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652 # +ifeq ($(CONFIG_CC_IS_CLANG),y) KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 +endif ifeq ($(CONFIG_X86_32),y) START := 0x8048000 diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 8c45b198b62f..bccea57dee81 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -923,6 +923,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) /* Event overflow */ handled++; + status &= ~mask; perf_sample_data_init(&data, 0, hwc->last_period); if (!x86_perf_event_set_period(event)) @@ -933,8 +934,6 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); - - status &= ~mask; } /* diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index e2975a32d443..d7da28fada87 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -8,7 +8,7 @@ #define ALT_FLAGS_SHIFT 16 -#define ALT_FLAG_NOT BIT(0) +#define ALT_FLAG_NOT (1 << 0) #define ALT_NOT(feature) ((ALT_FLAG_NOT << ALT_FLAGS_SHIFT) | (feature)) #ifndef __ASSEMBLY__ diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index cbaf174d8efd..b3af2d45bbbb 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -125,6 +125,8 @@ #define INTEL_FAM6_LUNARLAKE_M 0xBD +#define INTEL_FAM6_ARROWLAKE 0xC6 + /* "Small Core" Processors (Atom/E-Core) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 52788f79786f..255a78d9d906 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -49,7 +49,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); * simple as possible. * Must be called with preemption disabled. */ -static void __resctrl_sched_in(void) +static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; @@ -61,13 +61,13 @@ static void __resctrl_sched_in(void) * Else use the closid/rmid assigned to this cpu. */ if (static_branch_likely(&rdt_alloc_enable_key)) { - tmp = READ_ONCE(current->closid); + tmp = READ_ONCE(tsk->closid); if (tmp) closid = tmp; } if (static_branch_likely(&rdt_mon_enable_key)) { - tmp = READ_ONCE(current->rmid); + tmp = READ_ONCE(tsk->rmid); if (tmp) rmid = tmp; } @@ -88,17 +88,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } -static inline void resctrl_sched_in(void) +static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) - __resctrl_sched_in(); + __resctrl_sched_in(tsk); } void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else -static inline void resctrl_sched_in(void) {} +static inline void resctrl_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index b8357d6ecd47..b63be696b776 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -128,8 +128,9 @@ struct snp_psc_desc { struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY]; } __packed; -/* Guest message request error code */ +/* Guest message request error codes */ #define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32) +#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33) #define GHCB_MSR_TERM_REQ 0x100 #define GHCB_MSR_TERM_REASON_SET_POS 12 diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index cb1ee53ad3b1..770dcf75eaa9 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -261,20 +261,22 @@ enum avic_ipi_failure_cause { AVIC_IPI_FAILURE_INVALID_BACKING_PAGE, }; -#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0) +#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0) /* - * For AVIC, the max index allowed for physical APIC ID - * table is 0xff (255). + * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as + * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually. */ #define AVIC_MAX_PHYSICAL_ID 0XFEULL /* - * For x2AVIC, the max index allowed for physical APIC ID - * table is 0x1ff (511). + * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511). */ #define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL +static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID); +static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID); + #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index d13d71af5cf6..0a49a8de9f3c 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -18,32 +18,26 @@ /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); -__must_check unsigned long -copy_user_generic_string(void *to, const void *from, unsigned len); -__must_check unsigned long -copy_user_generic_unrolled(void *to, const void *from, unsigned len); +rep_movs_alternative(void *to, const void *from, unsigned len); static __always_inline __must_check unsigned long -copy_user_generic(void *to, const void *from, unsigned len) +copy_user_generic(void *to, const void *from, unsigned long len) { - unsigned ret; - + stac(); /* - * If CPU has ERMS feature, use copy_user_enhanced_fast_string. - * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. - * Otherwise, use copy_user_generic_unrolled. + * If CPU has FSRM feature, use 'rep movs'. + * Otherwise, use rep_movs_alternative. */ - alternative_call_2(copy_user_generic_unrolled, - copy_user_generic_string, - X86_FEATURE_REP_GOOD, - copy_user_enhanced_fast_string, - X86_FEATURE_ERMS, - ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), - "=d" (len)), - "1" (to), "2" (from), "3" (len) - : "memory", "rcx", "r8", "r9", "r10", "r11"); - return ret; + asm volatile( + "1:\n\t" + ALTERNATIVE("rep movsb", + "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM)) + "2:\n" + _ASM_EXTABLE_UA(1b, 2b) + :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT + : : "memory", "rax", "r8", "r9", "r10", "r11"); + clac(); + return len; } static __always_inline __must_check unsigned long @@ -58,9 +52,7 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size) return copy_user_generic((__force void *)dst, src, size); } -extern long __copy_user_nocache(void *dst, const void __user *src, - unsigned size, int zerorest); - +extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size); extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); @@ -69,8 +61,12 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { + long ret; kasan_check_write(dst, size); - return __copy_user_nocache(dst, src, size, 0); + stac(); + ret = __copy_user_nocache(dst, src, size); + clac(); + return ret; } static inline int @@ -85,11 +81,7 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) */ __must_check unsigned long -clear_user_original(void __user *addr, unsigned long len); -__must_check unsigned long -clear_user_rep_good(void __user *addr, unsigned long len); -__must_check unsigned long -clear_user_erms(void __user *addr, unsigned long len); +rep_stos_alternative(void __user *addr, unsigned long len); static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size) { @@ -102,16 +94,12 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr */ asm volatile( "1:\n\t" - ALTERNATIVE_3("rep stosb", - "call clear_user_erms", ALT_NOT(X86_FEATURE_FSRM), - "call clear_user_rep_good", ALT_NOT(X86_FEATURE_ERMS), - "call clear_user_original", ALT_NOT(X86_FEATURE_REP_GOOD)) + ALTERNATIVE("rep stosb", + "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS)) "2:\n" _ASM_EXTABLE_UA(1b, 2b) : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT - : "a" (0) - /* rep_good clobbers %rdx */ - : "rdx"); + : "a" (0)); clac(); diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h index 6daa9b0c8d11..a3c29b1496c8 100644 --- a/arch/x86/include/asm/xen/cpuid.h +++ b/arch/x86/include/asm/xen/cpuid.h @@ -89,11 +89,21 @@ * Sub-leaf 2: EAX: host tsc frequency in kHz */ +#define XEN_CPUID_TSC_EMULATED (1u << 0) +#define XEN_CPUID_HOST_TSC_RELIABLE (1u << 1) +#define XEN_CPUID_RDTSCP_INSTR_AVAIL (1u << 2) + +#define XEN_CPUID_TSC_MODE_DEFAULT (0) +#define XEN_CPUID_TSC_MODE_ALWAYS_EMULATE (1u) +#define XEN_CPUID_TSC_MODE_NEVER_EMULATE (2u) +#define XEN_CPUID_TSC_MODE_PVRDTSCP (3u) + /* * Leaf 5 (0x40000x04) * HVM-specific features * Sub-leaf 0: EAX: Features * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) + * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag) */ #define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */ #define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */ @@ -102,12 +112,16 @@ #define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */ #define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */ /* - * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be - * used to store high bits for the Destination ID. This expands the Destination - * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768. + * With interrupt format set to 0 (non-remappable) bits 55:49 from the + * IO-APIC RTE and bits 11:5 from the MSI address can be used to store + * high bits for the Destination ID. This expands the Destination ID + * field from 8 to 15 bits, allowing to target APIC IDs up 32768. */ #define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5) -/* Per-vCPU event channel upcalls */ +/* + * Per-vCPU event channel upcalls work correctly with physical IRQs + * bound to event channels. + */ #define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6) /* diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 1c38174b5f01..0dac4ab5b55b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) pr_debug("Local APIC address 0x%08x\n", madt->address); } - if (madt->header.revision >= 5) + + /* ACPI 6.3 and newer support the online capable bit. */ + if (acpi_gbl_FADT.header.revision > 6 || + (acpi_gbl_FADT.header.revision == 6 && + acpi_gbl_FADT.minor_revision >= 3)) acpi_support_online_capable = true; default_acpi_madt_oem_check(madt->header.oem_id, @@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags) if (lapic_flags & ACPI_MADT_ENABLED) return true; - if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE)) + if (!acpi_support_online_capable || + (lapic_flags & ACPI_MADT_ONLINE_CAPABLE)) return true; return false; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 380753b14cab..1547781e505b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) } } #endif + /* + * Work around Erratum 1386. The XSAVES instruction malfunctions in + * certain circumstances on Zen1/2 uarch, and not all parts have had + * updated microcode at the time of writing (March 2023). + * + * Affected parts all have no supervisor XSAVE states, meaning that + * the XSAVEC instruction (which works fine) is equivalent. + */ + clear_cpu_cap(c, X86_FEATURE_XSAVES); } static void init_amd_zn(struct cpuinfo_x86 *c) @@ -920,6 +929,10 @@ static void init_amd(struct cpuinfo_x86 *c) if (c->x86 >= 0x10) set_cpu_cap(c, X86_FEATURE_REP_GOOD); + /* AMD FSRM also implies FSRS */ + if (cpu_has(c, X86_FEATURE_FSRM)) + set_cpu_cap(c, X86_FEATURE_FSRS); + /* get apicid instead of initial apic id from cpuid */ c->apicid = hard_smp_processor_id(); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 7832a69d170e..2eec60f50057 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2355,6 +2355,7 @@ static void mce_restart(void) { mce_timer_delete_all(); on_each_cpu(mce_cpu_restart, NULL, 1); + mce_schedule_work(); } /* Toggle features for corrected errors */ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index f36dc2f796c5..f1197366a97d 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -358,12 +358,16 @@ static void __init ms_hyperv_init_platform(void) * To mirror what Windows does we should extract CPU management * features and use the ReservedIdentityBit to detect if Linux is the * root partition. But that requires negotiating CPU management - * interface (a process to be finalized). + * interface (a process to be finalized). For now, use the privilege + * flag as the indicator for running as root. * - * For now, use the privilege flag as the indicator for running as - * root. + * Hyper-V should never specify running as root and as a Confidential + * VM. But to protect against a compromised/malicious Hyper-V trying + * to exploit root behavior to expose Confidential VM memory, ignore + * the root partition setting if also a Confidential VM. */ - if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) { + if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) && + !(ms_hyperv.priv_high & HV_ISOLATION)) { hv_root_partition = true; pr_info("Hyper-V: running as root partition\n"); } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index eb07d4435391..b44c487727d4 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, { struct resctrl_schema *s; struct rdtgroup *rdtgrp; - struct rdt_domain *dom; struct rdt_resource *r; char *tok, *resname; int ret = 0; @@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, goto out; } - list_for_each_entry(s, &resctrl_schema_all, list) { - list_for_each_entry(dom, &s->res->domains, list) - memset(dom->staged_config, 0, sizeof(dom->staged_config)); - } + rdt_staged_configs_clear(); while ((tok = strsep(&buf, "\n")) != NULL) { resname = strim(strsep(&tok, ":")); @@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, } out: + rdt_staged_configs_clear(); rdtgroup_kn_unlock(of->kn); cpus_read_unlock(); return ret ?: nbytes; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 8edecc5763d8..85ceaf9a31ac 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); void __init mbm_config_rftype_init(const char *config); +void rdt_staged_configs_clear(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e2c1599d1b37..6ad33f355861 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...) va_end(ap); } +void rdt_staged_configs_clear(void) +{ + struct rdt_resource *r; + struct rdt_domain *dom; + + lockdep_assert_held(&rdtgroup_mutex); + + for_each_alloc_capable_rdt_resource(r) { + list_for_each_entry(dom, &r->domains, list) + memset(dom->staged_config, 0, sizeof(dom->staged_config)); + } +} + /* * Trivial allocator for CLOSIDs. Since h/w only supports a small number, * we can keep a bitmap of free CLOSIDs in a single integer. @@ -314,7 +327,7 @@ static void update_cpu_closid_rmid(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - resctrl_sched_in(); + resctrl_sched_in(current); } /* @@ -530,7 +543,7 @@ static void _update_task_closid_rmid(void *task) * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) - resctrl_sched_in(); + resctrl_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) @@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) { struct resctrl_schema *s; struct rdt_resource *r; - int ret; + int ret = 0; + + rdt_staged_configs_clear(); list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; @@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) } else { ret = rdtgroup_init_cat(s, rdtgrp->closid); if (ret < 0) - return ret; + goto out; } ret = resctrl_arch_update_domains(r, rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Failed to initialize allocations\n"); - return ret; + goto out; } } rdtgrp->mode = RDT_MODE_SHAREABLE; - return 0; +out: + rdt_staged_configs_clear(); + return ret; } static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 714166cc25f2..0bab497c9436 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1118,21 +1118,20 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, zerofrom = offsetof(struct xregs_state, extended_state_area); /* - * The ptrace buffer is in non-compacted XSAVE format. In - * non-compacted format disabled features still occupy state space, - * but there is no state to copy from in the compacted - * init_fpstate. The gap tracking will zero these states. - */ - mask = fpstate->user_xfeatures; - - /* - * Dynamic features are not present in init_fpstate. When they are - * in an all zeros init state, remove those from 'mask' to zero - * those features in the user buffer instead of retrieving them - * from init_fpstate. + * This 'mask' indicates which states to copy from fpstate. + * Those extended states that are not present in fpstate are + * either disabled or initialized: + * + * In non-compacted format, disabled features still occupy + * state space but there is no state to copy from in the + * compacted init_fpstate. The gap tracking will zero these + * states. + * + * The extended features have an all zeroes init state. Thus, + * remove them from 'mask' to zero those features in the user + * buffer instead of retrieving them from init_fpstate. */ - if (fpu_state_size_dynamic()) - mask &= (header.xfeatures | xinit->header.xcomp_bv); + mask = header.xfeatures; for_each_extended_xfeature(i, mask) { /* @@ -1151,9 +1150,8 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, pkru.pkru = pkru_val; membuf_write(&to, &pkru, sizeof(pkru)); } else { - copy_feature(header.xfeatures & BIT_ULL(i), &to, + membuf_write(&to, __raw_xsave_addr(xsave, i), - __raw_xsave_addr(xinit, i), xstate_sizes[i]); } /* diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 1265ad519249..fb4f1e01b64a 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub) RET SYM_FUNC_END(ftrace_stub) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_TYPED_FUNC_START(ftrace_stub_graph) CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub_graph) +#endif #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 470c128759ea..708c87b88cc1 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4e34b3b68ebd..bb65a68b4b49 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 679026a640ef..3f664ab277c4 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned struct ghcb *ghcb; int ret; - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) - return -ENODEV; - if (!fw_err) return -EINVAL; @@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned if (ret) goto e_put; - if (ghcb->save.sw_exit_info_2) { - /* Number of expected pages are returned in RBX */ - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && - ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN) - input->data_npages = ghcb_get_rbx(ghcb); + *fw_err = ghcb->save.sw_exit_info_2; + switch (*fw_err) { + case 0: + break; - *fw_err = ghcb->save.sw_exit_info_2; + case SNP_GUEST_REQ_ERR_BUSY: + ret = -EAGAIN; + break; + case SNP_GUEST_REQ_INVALID_LEN: + /* Number of expected pages are returned in RBX */ + if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { + input->data_npages = ghcb_get_rbx(ghcb); + ret = -ENOSPC; + break; + } + fallthrough; + default: ret = -EIO; + break; } e_put: diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index ef80d361b463..10622cf2b30f 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -33,8 +33,8 @@ static int __init iommu_init_noop(void) { return 0; } static void iommu_shutdown_noop(void) { } bool __init bool_x86_init_noop(void) { return false; } void x86_op_int_noop(int cpu) { } -static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; } -static __init void get_rtc_noop(struct timespec64 *now) { } +static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; } +static void get_rtc_noop(struct timespec64 *now) { } static __initconst const struct of_device_id of_cmos_match[] = { { .compatible = "motorola,mc146818" }, diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 8e578311ca9d..89ca7f4c1464 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -46,7 +46,6 @@ config KVM select KVM_XFER_TO_GUEST_WORK select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_VFIO - select SRCU select INTERVAL_TREE select HAVE_KVM_PM_NOTIFIER if PM select KVM_GENERIC_HARDWARE_ENABLING diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 042dee556125..995eb5054360 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -368,9 +368,39 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); - if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG - && ioapic->irr & (1 << index)) - ioapic_service(ioapic, index, false); + if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && + ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) { + /* + * Pending status in irr may be outdated: the IRQ line may have + * already been deasserted by a device while the IRQ was masked. + * This occurs, for instance, if the interrupt is handled in a + * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this + * case the guest acknowledges the interrupt to the device in + * its threaded irq handler, i.e. after the EOI but before + * unmasking, so at the time of unmasking the IRQ line is + * already down but our pending irr bit is still set. In such + * cases, injecting this pending interrupt to the guest is + * buggy: the guest will receive an extra unwanted interrupt. + * + * So we need to check here if the IRQ is actually still pending. + * As we are generally not able to probe the IRQ line status + * directly, we do it through irqfd resampler. Namely, we clear + * the pending status and notify the resampler that this interrupt + * is done, without actually injecting it into the guest. If the + * IRQ line is actually already deasserted, we are done. If it is + * still asserted, a new interrupt will be shortly triggered + * through irqfd and injected into the guest. + * + * If, however, it's not possible to resample (no irqfd resampler + * registered for this irq), then unconditionally inject this + * pending interrupt into the guest, so the guest will not miss + * an interrupt, although may get an extra unwanted interrupt. + */ + if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index)) + ioapic->irr &= ~(1 << index); + else + ioapic_service(ioapic, index, false); + } if (e->fields.delivery_mode == APIC_DM_FIXED) { struct kvm_lapic_irq irq; diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h index 287e98ef9df3..6272dabec02d 100644 --- a/arch/x86/kvm/kvm_onhyperv.h +++ b/arch/x86/kvm/kvm_onhyperv.h @@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, int hv_remote_flush_tlb(struct kvm *kvm); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); #else /* !CONFIG_HYPERV */ +static inline int hv_remote_flush_tlb(struct kvm *kvm) +{ + return -EOPNOTSUPP; +} + static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) { } diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index ca684979e90d..cfc8ab773025 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -27,19 +27,38 @@ #include "irq.h" #include "svm.h" -/* AVIC GATAG is encoded using VM and VCPU IDs */ -#define AVIC_VCPU_ID_BITS 8 -#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1) +/* + * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID, + * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry + * if an interrupt can't be delivered, e.g. because the vCPU isn't running. + * + * For the vCPU ID, use however many bits are currently allowed for the max + * guest physical APIC ID (limited by the size of the physical ID table), and + * use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the + * size of the GATag is defined by hardware (32 bits), but is an opaque value + * as far as hardware is concerned. + */ +#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK -#define AVIC_VM_ID_BITS 24 -#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS) -#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1) +#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK) +#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT) -#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \ - (y & AVIC_VCPU_ID_MASK)) -#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK) +#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK) #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK) +#define __AVIC_GATAG(vm_id, vcpu_id) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \ + ((vcpu_id) & AVIC_VCPU_ID_MASK)) +#define AVIC_GATAG(vm_id, vcpu_id) \ +({ \ + u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id); \ + \ + WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id)); \ + WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id)); \ + ga_tag; \ +}) + +static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u); + static bool force_avic; module_param_unsafe(force_avic, bool, 0444); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 252e7f37e4e2..f25bc3cbb250 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3729,7 +3729,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); } -static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) +static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3753,6 +3753,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) svm->current_vmcb->asid_generation--; } +static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) +{ + hpa_t root_tdp = vcpu->arch.mmu->root.hpa; + + /* + * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly + * flush the NPT mappings via hypercall as flushing the ASID only + * affects virtual to physical mappings, it does not invalidate guest + * physical to host physical mappings. + */ + if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp)) + hyperv_flush_guest_mapping(root_tdp); + + svm_flush_tlb_asid(vcpu); +} + +static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) +{ + /* + * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB + * flushes should be routed to hv_remote_flush_tlb() without requesting + * a "regular" remote flush. Reaching this point means either there's + * a KVM bug or a prior hv_remote_flush_tlb() call failed, both of + * which might be fatal to the guest. Yell, but try to recover. + */ + if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu))) + hv_remote_flush_tlb(vcpu->kvm); + + svm_flush_tlb_asid(vcpu); +} + static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4745,10 +4776,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .set_rflags = svm_set_rflags, .get_if_flag = svm_get_if_flag, - .flush_tlb_all = svm_flush_tlb_current, + .flush_tlb_all = svm_flush_tlb_all, .flush_tlb_current = svm_flush_tlb_current, .flush_tlb_gva = svm_flush_tlb_gva, - .flush_tlb_guest = svm_flush_tlb_current, + .flush_tlb_guest = svm_flush_tlb_asid, .vcpu_pre_run = svm_vcpu_pre_run, .vcpu_run = svm_vcpu_run, diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h index cff838f15db5..786d46d73a8e 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.h +++ b/arch/x86/kvm/svm/svm_onhyperv.h @@ -6,6 +6,8 @@ #ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__ #define __ARCH_X86_KVM_SVM_ONHYPERV_H__ +#include <asm/mshyperv.h> + #if IS_ENABLED(CONFIG_HYPERV) #include "kvm_onhyperv.h" @@ -15,6 +17,14 @@ static struct kvm_x86_ops svm_x86_ops; int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu); +static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu) +{ + struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments; + + return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB && + !!hve->hv_enlightenments_control.enlightened_npt_tlb; +} + static inline void svm_hv_init_vmcb(struct vmcb *vmcb) { struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; @@ -80,6 +90,11 @@ static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu) } #else +static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu) +{ + return false; +} + static inline void svm_hv_init_vmcb(struct vmcb *vmcb) { } diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 7c4f5ca405c7..768487611db7 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2903,7 +2903,7 @@ static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - bool ia32e; + bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || @@ -2923,12 +2923,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, vmcs12->host_ia32_perf_global_ctrl))) return -EINVAL; -#ifdef CONFIG_X86_64 - ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); -#else - ia32e = false; -#endif - if (ia32e) { if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) return -EINVAL; @@ -3022,7 +3016,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code) { - bool ia32e; + bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); *entry_failure_code = ENTRY_FAIL_DEFAULT; @@ -3048,6 +3042,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, vmcs12->guest_ia32_perf_global_ctrl))) return -EINVAL; + if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) + return -EINVAL; + + if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || + CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) + return -EINVAL; + /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: @@ -3059,7 +3060,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, */ if (to_vmx(vcpu)->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { - ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || CC(((vmcs12->guest_cr0 & X86_CR0_PG) && @@ -3868,7 +3868,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu) exit_qual = 0; } - if (ex->has_error_code) { + /* + * Unlike AMD's Paged Real Mode, which reports an error code on #PF + * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the + * "has error code" flags on VM-Exit if the CPU is in Real Mode. + */ + if (ex->has_error_code && is_protmode(vcpu)) { /* * Intel CPUs do not generate error codes with bits 31:16 set, * and more importantly VMX disallows setting bits 31:16 in the diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index f550540ed54e..631fd7da2bc3 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -262,7 +262,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) * eIBRS has its own protection against poisoned RSB, so it doesn't * need the RSB filling sequence. But it does need to be enabled, and a * single call to retire, before the first unbalanced RET. - */ + */ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ X86_FEATURE_RSB_VMEXIT_LITE @@ -311,7 +311,7 @@ SYM_FUNC_END(vmx_do_nmi_irqoff) * vmread_error_trampoline - Trampoline from inline asm to vmread_error() * @field: VMCS field encoding that failed * @fault: %true if the VMREAD faulted, %false if it failed - + * * Save and restore volatile registers across a call to vmread_error(). Note, * all parameters are passed on the stack. */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index bcac3efcde41..d2d6e1b6c788 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -874,7 +874,7 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; - else { + else { int mask = 0, match = 0; if (enable_ept && (eb & (1u << PF_VECTOR))) { @@ -1282,7 +1282,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) } } - if (vmx->nested.need_vmcs12_to_shadow_sync) + if (vmx->nested.need_vmcs12_to_shadow_sync) nested_sync_vmcs12_to_shadow(vcpu); if (vmx->guest_state_loaded) @@ -5049,10 +5049,10 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) if (to_vmx(vcpu)->nested.nested_run_pending) return -EBUSY; - /* - * An IRQ must not be injected into L2 if it's supposed to VM-Exit, - * e.g. if the IRQ arrived asynchronously after checking nested events. - */ + /* + * An IRQ must not be injected into L2 if it's supposed to VM-Exit, + * e.g. if the IRQ arrived asynchronously after checking nested events. + */ if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) return -EBUSY; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7713420abab0..3d852ce84920 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4432,6 +4432,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VAPIC: case KVM_CAP_ENABLE_CAP: case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: + case KVM_CAP_IRQFD_RESAMPLE: r = 1; break; case KVM_CAP_EXIT_HYPERCALL: @@ -8903,6 +8904,8 @@ restart: } if (ctxt->have_exception) { + WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write); + vcpu->mmio_needed = false; r = 1; inject_emulated_exception(vcpu); } else if (vcpu->arch.pio.count) { @@ -9906,13 +9909,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu) { + /* + * Suppress the error code if the vCPU is in Real Mode, as Real Mode + * exceptions don't report error codes. The presence of an error code + * is carried with the exception and only stripped when the exception + * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do + * report an error code despite the CPU being in Real Mode. + */ + vcpu->arch.exception.has_error_code &= is_protmode(vcpu); + trace_kvm_inj_exception(vcpu->arch.exception.vector, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.injected); - if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) - vcpu->arch.exception.error_code = false; static_call(kvm_x86_inject_exception)(vcpu); } diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 4f1a40a86534..01932af64193 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -71,6 +71,6 @@ ifneq ($(CONFIG_GENERIC_CSUM),y) endif lib-y += clear_page_64.o copy_page_64.o lib-y += memmove_64.o memset_64.o - lib-y += copy_user_64.o + lib-y += copy_user_64.o copy_user_uncached_64.o lib-y += cmpxchg16b_emu.o endif diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index ecbfb4dd3b01..f74a3e704a1c 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -57,134 +57,85 @@ EXPORT_SYMBOL_GPL(clear_page_erms) * Input: * rdi destination * rcx count + * rax is zero * * Output: * rcx: uncleared bytes or 0 if successful. */ -SYM_FUNC_START(clear_user_original) - /* - * Copy only the lower 32 bits of size as that is enough to handle the rest bytes, - * i.e., no need for a 'q' suffix and thus a REX prefix. - */ - mov %ecx,%eax - shr $3,%rcx - jz .Lrest_bytes +SYM_FUNC_START(rep_stos_alternative) + cmpq $64,%rcx + jae .Lunrolled - # do the qwords first - .p2align 4 -.Lqwords: - movq $0,(%rdi) - lea 8(%rdi),%rdi - dec %rcx - jnz .Lqwords + cmp $8,%ecx + jae .Lword -.Lrest_bytes: - and $7, %eax - jz .Lexit + testl %ecx,%ecx + je .Lexit - # now do the rest bytes -.Lbytes: - movb $0,(%rdi) +.Lclear_user_tail: +0: movb %al,(%rdi) inc %rdi - dec %eax - jnz .Lbytes - + dec %rcx + jnz .Lclear_user_tail .Lexit: - /* - * %rax still needs to be cleared in the exception case because this function is called - * from inline asm and the compiler expects %rax to be zero when exiting the inline asm, - * in case it might reuse it somewhere. - */ - xor %eax,%eax - RET - -.Lqwords_exception: - # convert remaining qwords back into bytes to return to caller - shl $3, %rcx - and $7, %eax - add %rax,%rcx - jmp .Lexit - -.Lbytes_exception: - mov %eax,%ecx - jmp .Lexit - - _ASM_EXTABLE_UA(.Lqwords, .Lqwords_exception) - _ASM_EXTABLE_UA(.Lbytes, .Lbytes_exception) -SYM_FUNC_END(clear_user_original) -EXPORT_SYMBOL(clear_user_original) - -/* - * Alternative clear user-space when CPU feature X86_FEATURE_REP_GOOD is - * present. - * Input: - * rdi destination - * rcx count - * - * Output: - * rcx: uncleared bytes or 0 if successful. - */ -SYM_FUNC_START(clear_user_rep_good) - # call the original thing for less than a cacheline - cmp $64, %rcx - jb clear_user_original - -.Lprep: - # copy lower 32-bits for rest bytes - mov %ecx, %edx - shr $3, %rcx - jz .Lrep_good_rest_bytes - -.Lrep_good_qwords: - rep stosq - -.Lrep_good_rest_bytes: - and $7, %edx - jz .Lrep_good_exit - -.Lrep_good_bytes: - mov %edx, %ecx - rep stosb - -.Lrep_good_exit: - # see .Lexit comment above - xor %eax, %eax RET -.Lrep_good_qwords_exception: - # convert remaining qwords back into bytes to return to caller - shl $3, %rcx - and $7, %edx - add %rdx, %rcx - jmp .Lrep_good_exit + _ASM_EXTABLE_UA( 0b, .Lexit) - _ASM_EXTABLE_UA(.Lrep_good_qwords, .Lrep_good_qwords_exception) - _ASM_EXTABLE_UA(.Lrep_good_bytes, .Lrep_good_exit) -SYM_FUNC_END(clear_user_rep_good) -EXPORT_SYMBOL(clear_user_rep_good) +.Lword: +1: movq %rax,(%rdi) + addq $8,%rdi + sub $8,%ecx + je .Lexit + cmp $8,%ecx + jae .Lword + jmp .Lclear_user_tail -/* - * Alternative clear user-space when CPU feature X86_FEATURE_ERMS is present. - * Input: - * rdi destination - * rcx count - * - * Output: - * rcx: uncleared bytes or 0 if successful. - * - */ -SYM_FUNC_START(clear_user_erms) - # call the original thing for less than a cacheline - cmp $64, %rcx - jb clear_user_original - -.Lerms_bytes: - rep stosb - -.Lerms_exit: - xorl %eax,%eax + .p2align 4 +.Lunrolled: +10: movq %rax,(%rdi) +11: movq %rax,8(%rdi) +12: movq %rax,16(%rdi) +13: movq %rax,24(%rdi) +14: movq %rax,32(%rdi) +15: movq %rax,40(%rdi) +16: movq %rax,48(%rdi) +17: movq %rax,56(%rdi) + addq $64,%rdi + subq $64,%rcx + cmpq $64,%rcx + jae .Lunrolled + cmpl $8,%ecx + jae .Lword + testl %ecx,%ecx + jne .Lclear_user_tail RET - _ASM_EXTABLE_UA(.Lerms_bytes, .Lerms_exit) -SYM_FUNC_END(clear_user_erms) -EXPORT_SYMBOL(clear_user_erms) + /* + * If we take an exception on any of the + * word stores, we know that %rcx isn't zero, + * so we can just go to the tail clearing to + * get the exact count. + * + * The unrolled case might end up clearing + * some bytes twice. Don't care. + * + * We could use the value in %rdi to avoid + * a second fault on the exact count case, + * but do we really care? No. + * + * Finally, we could try to align %rdi at the + * top of the unrolling. But unaligned stores + * just aren't that common or expensive. + */ + _ASM_EXTABLE_UA( 1b, .Lclear_user_tail) + _ASM_EXTABLE_UA(10b, .Lclear_user_tail) + _ASM_EXTABLE_UA(11b, .Lclear_user_tail) + _ASM_EXTABLE_UA(12b, .Lclear_user_tail) + _ASM_EXTABLE_UA(13b, .Lclear_user_tail) + _ASM_EXTABLE_UA(14b, .Lclear_user_tail) + _ASM_EXTABLE_UA(15b, .Lclear_user_tail) + _ASM_EXTABLE_UA(16b, .Lclear_user_tail) + _ASM_EXTABLE_UA(17b, .Lclear_user_tail) +SYM_FUNC_END(rep_stos_alternative) +EXPORT_SYMBOL(rep_stos_alternative) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 9dec1b38a98f..4fc5c2de2de4 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -7,404 +7,108 @@ */ #include <linux/linkage.h> -#include <asm/current.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> -#include <asm/cpufeatures.h> -#include <asm/alternative.h> #include <asm/asm.h> -#include <asm/smap.h> #include <asm/export.h> -#include <asm/trapnr.h> - -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - - _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align) - _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align) -.endm /* - * copy_user_generic_unrolled - memory copy with exception handling. - * This version is for CPUs like P4 that don't have efficient micro - * code for rep movsq - * - * Input: - * rdi destination - * rsi source - * rdx count - * - * Output: - * eax uncopied bytes or 0 if successful. - */ -SYM_FUNC_START(copy_user_generic_unrolled) - ASM_STAC - cmpl $8,%edx - jb .Lcopy_user_short_string_bytes - ALIGN_DESTINATION - movl %edx,%ecx - andl $63,%edx - shrl $6,%ecx - jz copy_user_short_string -1: movq (%rsi),%r8 -2: movq 1*8(%rsi),%r9 -3: movq 2*8(%rsi),%r10 -4: movq 3*8(%rsi),%r11 -5: movq %r8,(%rdi) -6: movq %r9,1*8(%rdi) -7: movq %r10,2*8(%rdi) -8: movq %r11,3*8(%rdi) -9: movq 4*8(%rsi),%r8 -10: movq 5*8(%rsi),%r9 -11: movq 6*8(%rsi),%r10 -12: movq 7*8(%rsi),%r11 -13: movq %r8,4*8(%rdi) -14: movq %r9,5*8(%rdi) -15: movq %r10,6*8(%rdi) -16: movq %r11,7*8(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - decl %ecx - jnz 1b - jmp copy_user_short_string - -30: shll $6,%ecx - addl %ecx,%edx - jmp .Lcopy_user_handle_tail - - _ASM_EXTABLE_CPY(1b, 30b) - _ASM_EXTABLE_CPY(2b, 30b) - _ASM_EXTABLE_CPY(3b, 30b) - _ASM_EXTABLE_CPY(4b, 30b) - _ASM_EXTABLE_CPY(5b, 30b) - _ASM_EXTABLE_CPY(6b, 30b) - _ASM_EXTABLE_CPY(7b, 30b) - _ASM_EXTABLE_CPY(8b, 30b) - _ASM_EXTABLE_CPY(9b, 30b) - _ASM_EXTABLE_CPY(10b, 30b) - _ASM_EXTABLE_CPY(11b, 30b) - _ASM_EXTABLE_CPY(12b, 30b) - _ASM_EXTABLE_CPY(13b, 30b) - _ASM_EXTABLE_CPY(14b, 30b) - _ASM_EXTABLE_CPY(15b, 30b) - _ASM_EXTABLE_CPY(16b, 30b) -SYM_FUNC_END(copy_user_generic_unrolled) -EXPORT_SYMBOL(copy_user_generic_unrolled) - -/* Some CPUs run faster using the string copy instructions. - * This is also a lot simpler. Use them when possible. - * - * Only 4GB of copy is supported. This shouldn't be a problem - * because the kernel normally only writes from/to page sized chunks - * even if user space passed a longer buffer. - * And more would be dangerous because both Intel and AMD have - * errata with rep movsq > 4GB. If someone feels the need to fix - * this please consider this. + * rep_movs_alternative - memory copy with exception handling. + * This version is for CPUs that don't have FSRM (Fast Short Rep Movs) * * Input: * rdi destination * rsi source - * rdx count + * rcx count * * Output: - * eax uncopied bytes or 0 if successful. - */ -SYM_FUNC_START(copy_user_generic_string) - ASM_STAC - cmpl $8,%edx - jb 2f /* less than 8 bytes, go to byte copy loop */ - ALIGN_DESTINATION - movl %edx,%ecx - shrl $3,%ecx - andl $7,%edx -1: rep movsq -2: movl %edx,%ecx -3: rep movsb - xorl %eax,%eax - ASM_CLAC - RET - -11: leal (%rdx,%rcx,8),%ecx -12: movl %ecx,%edx /* ecx is zerorest also */ - jmp .Lcopy_user_handle_tail - - _ASM_EXTABLE_CPY(1b, 11b) - _ASM_EXTABLE_CPY(3b, 12b) -SYM_FUNC_END(copy_user_generic_string) -EXPORT_SYMBOL(copy_user_generic_string) - -/* - * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. - * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. - * - * Input: - * rdi destination - * rsi source - * rdx count + * rcx uncopied bytes or 0 if successful. * - * Output: - * eax uncopied bytes or 0 if successful. + * NOTE! The calling convention is very intentionally the same as + * for 'rep movs', so that we can rewrite the function call with + * just a plain 'rep movs' on machines that have FSRM. But to make + * it simpler for us, we can clobber rsi/rdi and rax/r8-r11 freely. */ -SYM_FUNC_START(copy_user_enhanced_fast_string) - ASM_STAC - /* CPUs without FSRM should avoid rep movsb for short copies */ - ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM - movl %edx,%ecx -1: rep movsb - xorl %eax,%eax - ASM_CLAC +SYM_FUNC_START(rep_movs_alternative) + cmpq $64,%rcx + jae .Lunrolled + + cmp $8,%ecx + jae .Lword + + testl %ecx,%ecx + je .Lexit + +.Lcopy_user_tail: +0: movb (%rsi),%al +1: movb %al,(%rdi) + inc %rdi + inc %rsi + dec %rcx + jne .Lcopy_user_tail +.Lexit: RET -12: movl %ecx,%edx /* ecx is zerorest also */ - jmp .Lcopy_user_handle_tail - - _ASM_EXTABLE_CPY(1b, 12b) -SYM_FUNC_END(copy_user_enhanced_fast_string) -EXPORT_SYMBOL(copy_user_enhanced_fast_string) - -/* - * Try to copy last bytes and clear the rest if needed. - * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. - * Don't try to copy the tail if machine check happened - * - * Input: - * eax trap number written by ex_handler_copy() - * rdi destination - * rsi source - * rdx count - * - * Output: - * eax uncopied bytes or 0 if successful. - */ -SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) - cmp $X86_TRAP_MC,%eax - je 3f - - movl %edx,%ecx -1: rep movsb -2: mov %ecx,%eax - ASM_CLAC + _ASM_EXTABLE_UA( 0b, .Lexit) + _ASM_EXTABLE_UA( 1b, .Lexit) + + .p2align 4 +.Lword: +2: movq (%rsi),%rax +3: movq %rax,(%rdi) + addq $8,%rsi + addq $8,%rdi + sub $8,%ecx + je .Lexit + cmp $8,%ecx + jae .Lword + jmp .Lcopy_user_tail + + _ASM_EXTABLE_UA( 2b, .Lcopy_user_tail) + _ASM_EXTABLE_UA( 3b, .Lcopy_user_tail) + + .p2align 4 +.Lunrolled: +10: movq (%rsi),%r8 +11: movq 8(%rsi),%r9 +12: movq 16(%rsi),%r10 +13: movq 24(%rsi),%r11 +14: movq %r8,(%rdi) +15: movq %r9,8(%rdi) +16: movq %r10,16(%rdi) +17: movq %r11,24(%rdi) +20: movq 32(%rsi),%r8 +21: movq 40(%rsi),%r9 +22: movq 48(%rsi),%r10 +23: movq 56(%rsi),%r11 +24: movq %r8,32(%rdi) +25: movq %r9,40(%rdi) +26: movq %r10,48(%rdi) +27: movq %r11,56(%rdi) + addq $64,%rsi + addq $64,%rdi + subq $64,%rcx + cmpq $64,%rcx + jae .Lunrolled + cmpl $8,%ecx + jae .Lword + testl %ecx,%ecx + jne .Lcopy_user_tail RET -3: - movl %edx,%eax - ASM_CLAC - RET - - _ASM_EXTABLE_CPY(1b, 2b) - -.Lcopy_user_handle_align: - addl %ecx,%edx /* ecx is zerorest also */ - jmp .Lcopy_user_handle_tail - -SYM_CODE_END(.Lcopy_user_handle_tail) - -/* - * Finish memcpy of less than 64 bytes. #AC should already be set. - * - * Input: - * rdi destination - * rsi source - * rdx count (< 64) - * - * Output: - * eax uncopied bytes or 0 if successful. - */ -SYM_CODE_START_LOCAL(copy_user_short_string) - movl %edx,%ecx - andl $7,%edx - shrl $3,%ecx - jz .Lcopy_user_short_string_bytes -18: movq (%rsi),%r8 -19: movq %r8,(%rdi) - leaq 8(%rsi),%rsi - leaq 8(%rdi),%rdi - decl %ecx - jnz 18b -.Lcopy_user_short_string_bytes: - andl %edx,%edx - jz 23f - movl %edx,%ecx -21: movb (%rsi),%al -22: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 21b -23: xor %eax,%eax - ASM_CLAC - RET - -40: leal (%rdx,%rcx,8),%edx - jmp 60f -50: movl %ecx,%edx /* ecx is zerorest also */ -60: jmp .Lcopy_user_handle_tail - - _ASM_EXTABLE_CPY(18b, 40b) - _ASM_EXTABLE_CPY(19b, 40b) - _ASM_EXTABLE_CPY(21b, 50b) - _ASM_EXTABLE_CPY(22b, 50b) -SYM_CODE_END(copy_user_short_string) - -/* - * copy_user_nocache - Uncached memory copy with exception handling - * This will force destination out of cache for more performance. - * - * Note: Cached memory copy is used when destination or size is not - * naturally aligned. That is: - * - Require 8-byte alignment when size is 8 bytes or larger. - * - Require 4-byte alignment when size is 4 bytes. - */ -SYM_FUNC_START(__copy_user_nocache) - ASM_STAC - - /* If size is less than 8 bytes, go to 4-byte copy */ - cmpl $8,%edx - jb .L_4b_nocache_copy_entry - - /* If destination is not 8-byte aligned, "cache" copy to align it */ - ALIGN_DESTINATION - - /* Set 4x8-byte copy count and remainder */ - movl %edx,%ecx - andl $63,%edx - shrl $6,%ecx - jz .L_8b_nocache_copy_entry /* jump if count is 0 */ - - /* Perform 4x8-byte nocache loop-copy */ -.L_4x8b_nocache_copy_loop: -1: movq (%rsi),%r8 -2: movq 1*8(%rsi),%r9 -3: movq 2*8(%rsi),%r10 -4: movq 3*8(%rsi),%r11 -5: movnti %r8,(%rdi) -6: movnti %r9,1*8(%rdi) -7: movnti %r10,2*8(%rdi) -8: movnti %r11,3*8(%rdi) -9: movq 4*8(%rsi),%r8 -10: movq 5*8(%rsi),%r9 -11: movq 6*8(%rsi),%r10 -12: movq 7*8(%rsi),%r11 -13: movnti %r8,4*8(%rdi) -14: movnti %r9,5*8(%rdi) -15: movnti %r10,6*8(%rdi) -16: movnti %r11,7*8(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - decl %ecx - jnz .L_4x8b_nocache_copy_loop - - /* Set 8-byte copy count and remainder */ -.L_8b_nocache_copy_entry: - movl %edx,%ecx - andl $7,%edx - shrl $3,%ecx - jz .L_4b_nocache_copy_entry /* jump if count is 0 */ - - /* Perform 8-byte nocache loop-copy */ -.L_8b_nocache_copy_loop: -20: movq (%rsi),%r8 -21: movnti %r8,(%rdi) - leaq 8(%rsi),%rsi - leaq 8(%rdi),%rdi - decl %ecx - jnz .L_8b_nocache_copy_loop - - /* If no byte left, we're done */ -.L_4b_nocache_copy_entry: - andl %edx,%edx - jz .L_finish_copy - - /* If destination is not 4-byte aligned, go to byte copy: */ - movl %edi,%ecx - andl $3,%ecx - jnz .L_1b_cache_copy_entry - - /* Set 4-byte copy count (1 or 0) and remainder */ - movl %edx,%ecx - andl $3,%edx - shrl $2,%ecx - jz .L_1b_cache_copy_entry /* jump if count is 0 */ - - /* Perform 4-byte nocache copy: */ -30: movl (%rsi),%r8d -31: movnti %r8d,(%rdi) - leaq 4(%rsi),%rsi - leaq 4(%rdi),%rdi - - /* If no bytes left, we're done: */ - andl %edx,%edx - jz .L_finish_copy - - /* Perform byte "cache" loop-copy for the remainder */ -.L_1b_cache_copy_entry: - movl %edx,%ecx -.L_1b_cache_copy_loop: -40: movb (%rsi),%al -41: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz .L_1b_cache_copy_loop - - /* Finished copying; fence the prior stores */ -.L_finish_copy: - xorl %eax,%eax - ASM_CLAC - sfence - RET - -.L_fixup_4x8b_copy: - shll $6,%ecx - addl %ecx,%edx - jmp .L_fixup_handle_tail -.L_fixup_8b_copy: - lea (%rdx,%rcx,8),%rdx - jmp .L_fixup_handle_tail -.L_fixup_4b_copy: - lea (%rdx,%rcx,4),%rdx - jmp .L_fixup_handle_tail -.L_fixup_1b_copy: - movl %ecx,%edx -.L_fixup_handle_tail: - sfence - jmp .Lcopy_user_handle_tail - - _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy) - _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy) - _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy) - _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy) - _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy) - _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy) -SYM_FUNC_END(__copy_user_nocache) -EXPORT_SYMBOL(__copy_user_nocache) + _ASM_EXTABLE_UA(10b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(11b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(12b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(13b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(14b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(15b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(16b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(17b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(20b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(21b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(22b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(23b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(24b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(25b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(26b, .Lcopy_user_tail) + _ASM_EXTABLE_UA(27b, .Lcopy_user_tail) +SYM_FUNC_END(rep_movs_alternative) +EXPORT_SYMBOL(rep_movs_alternative) diff --git a/arch/x86/lib/copy_user_uncached_64.S b/arch/x86/lib/copy_user_uncached_64.S new file mode 100644 index 000000000000..5c5f38d32672 --- /dev/null +++ b/arch/x86/lib/copy_user_uncached_64.S @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2023 Linus Torvalds <torvalds@linux-foundation.org> + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/export.h> + +/* + * copy_user_nocache - Uncached memory copy with exception handling + * + * This copies from user space into kernel space, but the kernel + * space accesses can take a machine check exception, so they too + * need exception handling. + * + * Note: only 32-bit and 64-bit stores have non-temporal versions, + * and we only use aligned versions. Any unaligned parts at the + * start or end of the copy will be done using normal cached stores. + * + * Input: + * rdi destination + * rsi source + * edx count + * + * Output: + * rax uncopied bytes or 0 if successful. + */ +SYM_FUNC_START(__copy_user_nocache) + /* If destination is not 7-byte aligned, we'll have to align it */ + testb $7,%dil + jne .Lalign + +.Lis_aligned: + cmp $64,%edx + jb .Lquadwords + + .p2align 4,0x90 +.Lunrolled: +10: movq (%rsi),%r8 +11: movq 8(%rsi),%r9 +12: movq 16(%rsi),%r10 +13: movq 24(%rsi),%r11 +20: movnti %r8,(%rdi) +21: movnti %r9,8(%rdi) +22: movnti %r10,16(%rdi) +23: movnti %r11,24(%rdi) +30: movq 32(%rsi),%r8 +31: movq 40(%rsi),%r9 +32: movq 48(%rsi),%r10 +33: movq 56(%rsi),%r11 +40: movnti %r8,32(%rdi) +41: movnti %r9,40(%rdi) +42: movnti %r10,48(%rdi) +43: movnti %r11,56(%rdi) + + addq $64,%rsi + addq $64,%rdi + sub $64,%edx + cmp $64,%edx + jae .Lunrolled + +/* + * First set of user mode loads have been done + * without any stores, so if they fail, we can + * just try the non-unrolled loop. + */ +_ASM_EXTABLE_UA(10b, .Lquadwords) +_ASM_EXTABLE_UA(11b, .Lquadwords) +_ASM_EXTABLE_UA(12b, .Lquadwords) +_ASM_EXTABLE_UA(13b, .Lquadwords) + +/* + * The second set of user mode loads have been + * done with 32 bytes stored to the destination, + * so we need to take that into account before + * falling back to the unrolled loop. + */ +_ASM_EXTABLE_UA(30b, .Lfixup32) +_ASM_EXTABLE_UA(31b, .Lfixup32) +_ASM_EXTABLE_UA(32b, .Lfixup32) +_ASM_EXTABLE_UA(33b, .Lfixup32) + +/* + * An exception on a write means that we're + * done, but we need to update the count + * depending on where in the unrolled loop + * we were. + */ +_ASM_EXTABLE_UA(20b, .Ldone0) +_ASM_EXTABLE_UA(21b, .Ldone8) +_ASM_EXTABLE_UA(22b, .Ldone16) +_ASM_EXTABLE_UA(23b, .Ldone24) +_ASM_EXTABLE_UA(40b, .Ldone32) +_ASM_EXTABLE_UA(41b, .Ldone40) +_ASM_EXTABLE_UA(42b, .Ldone48) +_ASM_EXTABLE_UA(43b, .Ldone56) + +.Lquadwords: + cmp $8,%edx + jb .Llong +50: movq (%rsi),%rax +51: movnti %rax,(%rdi) + addq $8,%rsi + addq $8,%rdi + sub $8,%edx + jmp .Lquadwords + +/* + * If we fail on the last full quadword, we will + * not try to do any byte-wise cached accesses. + * We will try to do one more 4-byte uncached + * one, though. + */ +_ASM_EXTABLE_UA(50b, .Llast4) +_ASM_EXTABLE_UA(51b, .Ldone0) + +.Llong: + test $4,%dl + je .Lword +60: movl (%rsi),%eax +61: movnti %eax,(%rdi) + addq $4,%rsi + addq $4,%rdi + sub $4,%edx +.Lword: + sfence + test $2,%dl + je .Lbyte +70: movw (%rsi),%ax +71: movw %ax,(%rdi) + addq $2,%rsi + addq $2,%rdi + sub $2,%edx +.Lbyte: + test $1,%dl + je .Ldone +80: movb (%rsi),%al +81: movb %al,(%rdi) + dec %edx +.Ldone: + mov %edx,%eax + RET + +/* + * If we fail on the last four bytes, we won't + * bother with any fixups. It's dead, Jim. Note + * that there's no need for 'sfence' for any + * of this, since the exception will have been + * serializing. + */ +_ASM_EXTABLE_UA(60b, .Ldone) +_ASM_EXTABLE_UA(61b, .Ldone) +_ASM_EXTABLE_UA(70b, .Ldone) +_ASM_EXTABLE_UA(71b, .Ldone) +_ASM_EXTABLE_UA(80b, .Ldone) +_ASM_EXTABLE_UA(81b, .Ldone) + +/* + * This is the "head needs aliging" case when + * the destination isn't 8-byte aligned. The + * 4-byte case can be done uncached, but any + * smaller alignment is done with regular stores. + */ +.Lalign: + test $1,%dil + je .Lalign_word + test %edx,%edx + je .Ldone +90: movb (%rsi),%al +91: movb %al,(%rdi) + inc %rsi + inc %rdi + dec %edx +.Lalign_word: + test $2,%dil + je .Lalign_long + cmp $2,%edx + jb .Lbyte +92: movw (%rsi),%ax +93: movw %ax,(%rdi) + addq $2,%rsi + addq $2,%rdi + sub $2,%edx +.Lalign_long: + test $4,%dil + je .Lis_aligned + cmp $4,%edx + jb .Lword +94: movl (%rsi),%eax +95: movnti %eax,(%rdi) + addq $4,%rsi + addq $4,%rdi + sub $4,%edx + jmp .Lis_aligned + +/* + * If we fail on the initial alignment accesses, + * we're all done. Again, no point in trying to + * do byte-by-byte probing if the 4-byte load + * fails - we're not doing any uncached accesses + * any more. + */ +_ASM_EXTABLE_UA(90b, .Ldone) +_ASM_EXTABLE_UA(91b, .Ldone) +_ASM_EXTABLE_UA(92b, .Ldone) +_ASM_EXTABLE_UA(93b, .Ldone) +_ASM_EXTABLE_UA(94b, .Ldone) +_ASM_EXTABLE_UA(95b, .Ldone) + +/* + * Exception table fixups for faults in the middle + */ +.Ldone56: sub $8,%edx +.Ldone48: sub $8,%edx +.Ldone40: sub $8,%edx +.Ldone32: sub $8,%edx +.Ldone24: sub $8,%edx +.Ldone16: sub $8,%edx +.Ldone8: sub $8,%edx +.Ldone0: + mov %edx,%eax + RET + +.Lfixup32: + addq $32,%rsi + addq $32,%rdi + sub $32,%edx + jmp .Lquadwords + +.Llast4: +52: movl (%rsi),%eax +53: movnti %eax,(%rdi) + sfence + sub $4,%edx + mov %edx,%eax + RET +_ASM_EXTABLE_UA(52b, .Ldone0) +_ASM_EXTABLE_UA(53b, .Ldone0) + +SYM_FUNC_END(__copy_user_nocache) +EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index a64017602010..8f95fb267caa 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -11,13 +11,6 @@ .section .noinstr.text, "ax" /* - * We build a jump to memcpy_orig by default which gets NOPped out on - * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which - * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs - * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. - */ - -/* * memcpy - Copy a memory block. * * Input: @@ -27,17 +20,21 @@ * * Output: * rax original destination + * + * The FSRM alternative should be done inline (avoiding the call and + * the disgusting return handling), but that would require some help + * from the compiler for better calling conventions. + * + * The 'rep movsb' itself is small enough to replace the call, but the + * two register moves blow up the code. And one of them is "needed" + * only for the return value that is the same as the source input, + * which the compiler could/should do much better anyway. */ SYM_TYPED_FUNC_START(__memcpy) - ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ - "jmp memcpy_erms", X86_FEATURE_ERMS + ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM movq %rdi, %rax movq %rdx, %rcx - shrq $3, %rcx - andl $7, %edx - rep movsq - movl %edx, %ecx rep movsb RET SYM_FUNC_END(__memcpy) @@ -46,17 +43,6 @@ EXPORT_SYMBOL(__memcpy) SYM_FUNC_ALIAS(memcpy, __memcpy) EXPORT_SYMBOL(memcpy) -/* - * memcpy_erms() - enhanced fast string memcpy. This is faster and - * simpler than memcpy. Use memcpy_erms when possible. - */ -SYM_FUNC_START_LOCAL(memcpy_erms) - movq %rdi, %rax - movq %rdx, %rcx - rep movsb - RET -SYM_FUNC_END(memcpy_erms) - SYM_FUNC_START_LOCAL(memcpy_orig) movq %rdi, %rax diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 6143b1a6fa2c..7c59a704c458 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -18,27 +18,22 @@ * rdx count (bytes) * * rax original destination + * + * The FSRS alternative should be done inline (avoiding the call and + * the disgusting return handling), but that would require some help + * from the compiler for better calling conventions. + * + * The 'rep stosb' itself is small enough to replace the call, but all + * the register moves blow up the code. And two of them are "needed" + * only for the return value that is the same as the source input, + * which the compiler could/should do much better anyway. */ SYM_FUNC_START(__memset) - /* - * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended - * to use it when possible. If not available, use fast string instructions. - * - * Otherwise, use original memset function. - */ - ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \ - "jmp memset_erms", X86_FEATURE_ERMS + ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS movq %rdi,%r9 + movb %sil,%al movq %rdx,%rcx - andl $7,%edx - shrq $3,%rcx - /* expand byte value */ - movzbl %sil,%esi - movabs $0x0101010101010101,%rax - imulq %rsi,%rax - rep stosq - movl %edx,%ecx rep stosb movq %r9,%rax RET @@ -48,26 +43,6 @@ EXPORT_SYMBOL(__memset) SYM_FUNC_ALIAS(memset, __memset) EXPORT_SYMBOL(memset) -/* - * ISO C memset - set a memory block to a byte value. This function uses - * enhanced rep stosb to override the fast string function. - * The code is simpler and shorter than the fast string function as well. - * - * rdi destination - * rsi value (char) - * rdx count (bytes) - * - * rax original destination - */ -SYM_FUNC_START_LOCAL(memset_erms) - movq %rdi,%r9 - movb %sil,%al - movq %rdx,%rcx - rep stosb - movq %r9,%rax - RET -SYM_FUNC_END(memset_erms) - SYM_FUNC_START_LOCAL(memset_orig) movq %rdi,%r10 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 6c1f8ac5e721..c3a5bbc0b41e 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -45,7 +45,11 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) { unsigned long flushed, dest = (unsigned long) dst; - long rc = __copy_user_nocache(dst, src, size, 0); + long rc; + + stac(); + rc = __copy_user_nocache(dst, src, size); + clac(); /* * __copy_user_nocache() uses non-temporal stores for the bulk diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 7316a8224259..e91500a80963 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -10,6 +10,7 @@ #include <asm/fixmap.h> #include <asm/desc.h> #include <asm/kasan.h> +#include <asm/setup.h> static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); @@ -29,6 +30,12 @@ static __init void init_cea_offsets(void) unsigned int max_cea; unsigned int i, j; + if (!kaslr_enabled()) { + for_each_possible_cpu(i) + per_cpu(_cea_offset, i) = i; + return; + } + max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; /* O(sodding terrible) */ diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 88cccd65029d..c6efcf559d88 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp) cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32)); - cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); + if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) + return; if (!strncmp(buffer, cmdline_on, sizeof(buffer))) sme_me_mask = me_mask; diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 615a76d70019..bf5161dcf89e 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -7,6 +7,7 @@ #include <linux/dmi.h> #include <linux/pci.h> #include <linux/vgaarb.h> +#include <asm/amd_nb.h> #include <asm/hpet.h> #include <asm/pci_x86.h> @@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma); #endif + +#ifdef CONFIG_AMD_NB + +#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008 +#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L + +static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev) +{ + u32 data; + + if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) { + data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK; + if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data)) + pci_err(dev, "Failed to write data 0x%x\n", data); + } else { + pci_err(dev, "Failed to read data\n"); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0); +#endif diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 17f09dc26381..82fec66d46d2 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -69,8 +69,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_string.o += $(PURGATORY_CFLAGS) -AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2 +asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x)) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3c5b52fbe4a7..a9ec8c9f5c5d 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_PV_DOM0) += vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index bb59cc6ddb2d..093b78c8bbec 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1390,7 +1390,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) x86_platform.set_legacy_features = xen_dom0_set_legacy_features; - xen_init_vga(info, xen_start_info->console.dom0.info_size); + xen_init_vga(info, xen_start_info->console.dom0.info_size, + &boot_params.screen_info); xen_start_info->console.domU.mfn = 0; xen_start_info->console.domU.evtchn = 0; diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index bcae606bbc5c..ada3868c02c2 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params) x86_init.oem.banner = xen_banner; xen_efi_init(boot_params); + + if (xen_initial_domain()) { + struct xen_platform_op op = { + .cmd = XENPF_get_dom0_console, + }; + int ret = HYPERVISOR_platform_op(&op); + + if (ret > 0) + xen_init_vga(&op.u.dom0_console, + min(ret * sizeof(char), + sizeof(op.u.dom0_console)), + &boot_params->screen_info); + } } void __init mem_map_via_hcall(struct boot_params *boot_params_p) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 1d597364b49d..b74ac2562cfb 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -20,6 +20,7 @@ #include <asm/pvclock.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> +#include <asm/xen/cpuid.h> #include <xen/events.h> #include <xen/features.h> @@ -503,11 +504,7 @@ static int __init xen_tsc_safe_clocksource(void) /* Leaf 4, sub-leaf 0 (0x40000x03) */ cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx); - /* tsc_mode = no_emulate (2) */ - if (ebx != 2) - return 0; - - return 1; + return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE; } static void __init xen_time_init(void) diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c index 14ea32e734d5..d97adab8420f 100644 --- a/arch/x86/xen/vga.c +++ b/arch/x86/xen/vga.c @@ -9,10 +9,9 @@ #include "xen-ops.h" -void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size, + struct screen_info *screen_info) { - struct screen_info *screen_info = &boot_params.screen_info; - /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info->orig_video_mode = 3; diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9a8bb972193d..a10903785a33 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu) struct dom0_vga_console_info; -#ifdef CONFIG_XEN_PV_DOM0 -void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size, + struct screen_info *); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, - size_t size) + size_t size, struct screen_info *si) { } #endif diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index cd98366a9b23..f0a7d1c2641e 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -539,7 +539,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { - size_t len; + size_t len, off = 0; if (!sp) sp = stack_pointer(task); @@ -548,9 +548,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); printk("%sStack:\n", loglvl); - print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, - STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, - sp, len, false); + while (off < len) { + u8 line[STACK_DUMP_LINE_SIZE]; + size_t line_len = len - off > STACK_DUMP_LINE_SIZE ? + STACK_DUMP_LINE_SIZE : len - off; + + __memcpy(line, (u8 *)sp + off, line_len); + print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, + STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, + line, line_len, false); + off += STACK_DUMP_LINE_SIZE; + } show_trace(task, sp, loglvl); } |